diff --git a/CHANGELOG.md b/CHANGELOG.md index c986668cd98..a946c89a516 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +Release v1.35.6 (2020-10-08) +=== + +### Service Client Updates +* `service/ce`: Updates service API and documentation +* `service/ec2`: Updates service API and documentation + * AWS EC2 RevokeSecurityGroupIngress and RevokeSecurityGroupEgress APIs will return IpPermissions which do not match with any existing IpPermissions for security groups in default VPC and EC2-Classic. +* `service/eventbridge`: Updates service API and documentation +* `service/events`: Updates service API and documentation + * Amazon EventBridge (formerly called CloudWatch Events) adds support for target Dead-letter Queues and custom retry policies. +* `service/rds`: Updates service API and documentation + * Supports a new parameter to set the max allocated storage in gigabytes for restore database instance from S3 and restore database instance to a point in time APIs. +* `service/rekognition`: Updates service API and documentation + * This release provides location information for the manifest validation files. +* `service/sagemaker`: Updates service API and documentation + * This release enables Sagemaker customers to convert Tensorflow and PyTorch models to CoreML (ML Model) format. +* `service/sns`: Updates service documentation + * Documentation updates for SNS. + Release v1.35.5 (2020-10-07) === diff --git a/aws/version.go b/aws/version.go index 3d79d3a386e..46a3dab5c0d 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.5" +const SDKVersion = "1.35.6" diff --git a/models/apis/ce/2017-10-25/api-2.json b/models/apis/ce/2017-10-25/api-2.json index ea180183554..8b74e54b7c3 100644 --- a/models/apis/ce/2017-10-25/api-2.json +++ b/models/apis/ce/2017-10-25/api-2.json @@ -568,7 +568,8 @@ "EffectiveEnd":{"shape":"ZonedDateTime"}, "Name":{"shape":"CostCategoryName"}, "RuleVersion":{"shape":"CostCategoryRuleVersion"}, - "Rules":{"shape":"CostCategoryRulesList"} + "Rules":{"shape":"CostCategoryRulesList"}, + "ProcessingStatus":{"shape":"CostCategoryProcessingStatusList"} } }, "CostCategoryMaxResults":{ @@ -582,6 +583,17 @@ "min":1, "pattern":"^(?! )[\\p{L}\\p{N}\\p{Z}-_]*(? The list of processing statuses for Cost Management products for a specific cost category.

", + "refs": { + "CostCategoryProcessingStatusList$member": null + } + }, + "CostCategoryProcessingStatusList": { + "base": null, + "refs": { + "CostCategory$ProcessingStatus": "

The list of processing statuses for Cost Management products for a specific cost category.

", + "CostCategoryReference$ProcessingStatus": "

The list of processing statuses for Cost Management products for a specific cost category.

" + } + }, "CostCategoryReference": { "base": "

A reference to a Cost Category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

", "refs": { @@ -220,10 +233,23 @@ "UpdateCostCategoryDefinitionRequest$Rules": "

The Expression object used to categorize costs. For more information, see CostCategoryRule .

" } }, + "CostCategoryStatus": { + "base": null, + "refs": { + "CostCategoryProcessingStatus$Status": "

The process status for a specific cost category.

" + } + }, + "CostCategoryStatusComponent": { + "base": null, + "refs": { + "CostCategoryProcessingStatus$Component": "

The Cost Management product name of the applied status.

" + } + }, "CostCategoryValue": { "base": "

The value a line item will be categorized as, if it matches the rule.

", "refs": { - "CostCategoryRule$Value": null + "CostCategoryRule$Value": null, + "CostCategoryValuesList$member": null } }, "CostCategoryValues": { @@ -232,6 +258,12 @@ "Expression$CostCategories": "

The filter based on CostCategory values.

" } }, + "CostCategoryValuesList": { + "base": null, + "refs": { + "CostCategoryReference$Values": "

A list of unique cost category values in a specific cost category.

" + } + }, "Coverage": { "base": "

The amount of instance usage that a reservation covered.

", "refs": { @@ -999,6 +1031,7 @@ "MatchOptions": { "base": null, "refs": { + "CostCategoryValues$MatchOptions": "

The match options that you can use to filter your results. MatchOptions is only applicable for only applicable for actions related to cost category. The default values for MatchOptions is EQUALS and CASE_SENSITIVE.

", "DimensionValues$MatchOptions": "

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to Cost Category. The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

", "TagValues$MatchOptions": "

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to Cost Category. The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

" } diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 5ad5b457323..253075b0602 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -3571,7 +3571,8 @@ "method":"POST", "requestUri":"/" }, - "input":{"shape":"RevokeSecurityGroupEgressRequest"} + "input":{"shape":"RevokeSecurityGroupEgressRequest"}, + "output":{"shape":"RevokeSecurityGroupEgressResult"} }, "RevokeSecurityGroupIngress":{ "name":"RevokeSecurityGroupIngress", @@ -3579,7 +3580,8 @@ "method":"POST", "requestUri":"/" }, - "input":{"shape":"RevokeSecurityGroupIngressRequest"} + "input":{"shape":"RevokeSecurityGroupIngressRequest"}, + "output":{"shape":"RevokeSecurityGroupIngressResult"} }, "RunInstances":{ "name":"RunInstances", @@ -25460,6 +25462,19 @@ } } }, + "RevokeSecurityGroupEgressResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + }, + "UnknownIpPermissions":{ + "shape":"IpPermissionList", + "locationName":"unknownIpPermissionSet" + } + } + }, "RevokeSecurityGroupIngressRequest":{ "type":"structure", "members":{ @@ -25478,6 +25493,19 @@ } } }, + "RevokeSecurityGroupIngressResult":{ + "type":"structure", + "members":{ + "Return":{ + "shape":"Boolean", + "locationName":"return" + }, + "UnknownIpPermissions":{ + "shape":"IpPermissionList", + "locationName":"unknownIpPermissionSet" + } + } + }, "RootDeviceType":{ "type":"string", "enum":[ diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 808917fa2ea..360e4070b20 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -403,8 +403,8 @@ "RestoreAddressToClassic": "

Restores an Elastic IP address that was previously moved to the EC2-VPC platform back to the EC2-Classic platform. You cannot move an Elastic IP address that was originally allocated for use in EC2-VPC. The Elastic IP address must not be associated with an instance or network interface.

", "RestoreManagedPrefixListVersion": "

Restores the entries from a previous version of a managed prefix list to a new version of the prefix list.

", "RevokeClientVpnIngress": "

Removes an ingress authorization rule from a Client VPN endpoint.

", - "RevokeSecurityGroupEgress": "

[VPC only] Removes the specified egress rules from a security group for EC2-VPC. This action doesn't apply to security groups for use in EC2-Classic. To remove a rule, the values that you specify (for example, ports) must match the existing rule's values exactly.

Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not have to specify the description to revoke the rule.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

", - "RevokeSecurityGroupIngress": "

Removes the specified ingress rules from a security group. To remove a rule, the values that you specify (for example, ports) must match the existing rule's values exactly.

[EC2-Classic only] If the values you specify do not match the existing rule's values, no error is returned. Use DescribeSecurityGroups to verify that the rule has been removed.

Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not have to specify the description to revoke the rule.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

", + "RevokeSecurityGroupEgress": "

[VPC only] Removes the specified egress rules from a security group for EC2-VPC. This action does not apply to security groups for use in EC2-Classic. To remove a rule, the values that you specify (for example, ports) must match the existing rule's values exactly.

[Default VPC] If the values you specify do not match the existing rule's values, no error is returned, and the output describes the security group rules that were not revoked.

AWS recommends that you use DescribeSecurityGroups to verify that the rule has been removed.

Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not have to specify the description to revoke the rule.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

", + "RevokeSecurityGroupIngress": "

Removes the specified ingress rules from a security group. To remove a rule, the values that you specify (for example, ports) must match the existing rule's values exactly.

[EC2-Classic , default VPC] If the values you specify do not match the existing rule's values, no error is returned, and the output describes the security group rules that were not revoked.

AWS recommends that you use DescribeSecurityGroups to verify that the rule has been removed.

Each rule consists of the protocol and the CIDR range or source security group. For the TCP and UDP protocols, you must also specify the destination port or range of ports. For the ICMP protocol, you must also specify the ICMP type and code. If the security group rule has a description, you do not have to specify the description to revoke the rule.

Rule changes are propagated to instances within the security group as quickly as possible. However, a small delay might occur.

", "RunInstances": "

Launches the specified number of instances using an AMI for which you have permissions.

You can specify a number of options, or leave the default options. The following rules apply:

You can create a launch template, which is a resource that contains the parameters to launch an instance. When you launch an instance using RunInstances, you can specify the launch template instead of specifying the launch parameters.

To ensure faster instance launches, break up large requests into smaller batches. For example, create five separate launch requests for 100 instances each instead of one launch request for 500 instances.

An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances. You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging your Amazon EC2 resources.

Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key pairs in the Amazon Elastic Compute Cloud User Guide.

For troubleshooting, see What to do if an instance immediately terminates, and Troubleshooting connecting to your instance in the Amazon Elastic Compute Cloud User Guide.

", "RunScheduledInstances": "

Launches the specified Scheduled Instances.

Before you can launch a Scheduled Instance, you must purchase it and obtain an identifier using PurchaseScheduledInstances.

You must launch a Scheduled Instance during its scheduled time period. You can't stop or reboot a Scheduled Instance, but you can terminate it as needed. If you terminate a Scheduled Instance before the current scheduled time period ends, you can launch it again after a few minutes. For more information, see Scheduled Instances in the Amazon Elastic Compute Cloud User Guide.

", "SearchLocalGatewayRoutes": "

Searches for routes in the specified local gateway route table.

", @@ -1631,7 +1631,9 @@ "RevokeClientVpnIngressRequest$RevokeAllGroups": "

Indicates whether access should be revoked for all clients.

", "RevokeClientVpnIngressRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "RevokeSecurityGroupEgressRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "RevokeSecurityGroupEgressResult$Return": "

Returns true if the request succeeds; otherwise, returns an error.

", "RevokeSecurityGroupIngressRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", + "RevokeSecurityGroupIngressResult$Return": "

Returns true if the request succeeds; otherwise, returns an error.

", "RouteTableAssociation$Main": "

Indicates whether this is the main route table.

", "RunInstancesMonitoringEnabled$Enabled": "

Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.

", "RunInstancesRequest$DisableApiTermination": "

If you set this parameter to true, you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use ModifyInstanceAttribute. Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate, you can terminate the instance by running the shutdown command from the instance.

Default: false

", @@ -8265,7 +8267,9 @@ "AuthorizeSecurityGroupEgressRequest$IpPermissions": "

The sets of IP permissions. You can't specify a destination security group and a CIDR IP address range in the same set of permissions.

", "AuthorizeSecurityGroupIngressRequest$IpPermissions": "

The sets of IP permissions.

", "RevokeSecurityGroupEgressRequest$IpPermissions": "

The sets of IP permissions. You can't specify a destination security group and a CIDR IP address range in the same set of permissions.

", + "RevokeSecurityGroupEgressResult$UnknownIpPermissions": "

The outbound rules that were unknown to the service. In some cases, unknownIpPermissionSet might be in a different format from the request parameter.

", "RevokeSecurityGroupIngressRequest$IpPermissions": "

The sets of IP permissions. You can't specify a source security group and a CIDR IP address range in the same set of permissions.

", + "RevokeSecurityGroupIngressResult$UnknownIpPermissions": "

The inbound rules that were unknown to the service. In some cases, unknownIpPermissionSet might be in a different format from the request parameter.

", "SecurityGroup$IpPermissions": "

The inbound rules associated with the security group.

", "SecurityGroup$IpPermissionsEgress": "

[VPC only] The outbound rules associated with the security group.

", "UpdateSecurityGroupRuleDescriptionsEgressRequest$IpPermissions": "

The IP permissions for the security group rule.

", @@ -11542,11 +11546,21 @@ "refs": { } }, + "RevokeSecurityGroupEgressResult": { + "base": null, + "refs": { + } + }, "RevokeSecurityGroupIngressRequest": { "base": null, "refs": { } }, + "RevokeSecurityGroupIngressResult": { + "base": null, + "refs": { + } + }, "RootDeviceType": { "base": null, "refs": { diff --git a/models/apis/eventbridge/2015-10-07/api-2.json b/models/apis/eventbridge/2015-10-07/api-2.json index 4d8f2b396fe..06281390fc8 100644 --- a/models/apis/eventbridge/2015-10-07/api-2.json +++ b/models/apis/eventbridge/2015-10-07/api-2.json @@ -582,6 +582,12 @@ "Name":{"shape":"EventSourceName"} } }, + "DeadLetterConfig":{ + "type":"structure", + "members":{ + "Arn":{"shape":"ResourceArn"} + } + }, "DeleteEventBusRequest":{ "type":"structure", "required":["Name"], @@ -990,6 +996,16 @@ }, "exception":true }, + "MaximumEventAgeInSeconds":{ + "type":"integer", + "max":86400, + "min":60 + }, + "MaximumRetryAttempts":{ + "type":"integer", + "max":185, + "min":0 + }, "MessageGroupId":{"type":"string"}, "NetworkConfiguration":{ "type":"structure", @@ -1297,12 +1313,24 @@ }, "exception":true }, + "ResourceArn":{ + "type":"string", + "max":1600, + "min":1 + }, "ResourceNotFoundException":{ "type":"structure", "members":{ }, "exception":true }, + "RetryPolicy":{ + "type":"structure", + "members":{ + "MaximumRetryAttempts":{"shape":"MaximumRetryAttempts"}, + "MaximumEventAgeInSeconds":{"shape":"MaximumEventAgeInSeconds"} + } + }, "RoleArn":{ "type":"string", "max":1600, @@ -1488,7 +1516,9 @@ "BatchParameters":{"shape":"BatchParameters"}, "SqsParameters":{"shape":"SqsParameters"}, "HttpParameters":{"shape":"HttpParameters"}, - "RedshiftDataParameters":{"shape":"RedshiftDataParameters"} + "RedshiftDataParameters":{"shape":"RedshiftDataParameters"}, + "DeadLetterConfig":{"shape":"DeadLetterConfig"}, + "RetryPolicy":{"shape":"RetryPolicy"} } }, "TargetArn":{ diff --git a/models/apis/eventbridge/2015-10-07/docs-2.json b/models/apis/eventbridge/2015-10-07/docs-2.json index bf59bd81201..e3e309d7f4b 100644 --- a/models/apis/eventbridge/2015-10-07/docs-2.json +++ b/models/apis/eventbridge/2015-10-07/docs-2.json @@ -150,6 +150,12 @@ "refs": { } }, + "DeadLetterConfig": { + "base": "

A DeadLetterConfig object that contains information about a dead-letter queue configuration.

", + "refs": { + "Target$DeadLetterConfig": "

The DeadLetterConfig that defines the target queue to send dead-letter queue events to.

" + } + }, "DeleteEventBusRequest": { "base": null, "refs": { @@ -537,6 +543,18 @@ "refs": { } }, + "MaximumEventAgeInSeconds": { + "base": null, + "refs": { + "RetryPolicy$MaximumEventAgeInSeconds": "

The maximum amount of time, in seconds, to continue to make retry attempts.

" + } + }, + "MaximumRetryAttempts": { + "base": null, + "refs": { + "RetryPolicy$MaximumRetryAttempts": "

The maximum number of retry attempts to make before the request fails. Retry attempts continue until either the maximum number of attempts is made or until the duration of the MaximumEventAgeInSeconds is met.

" + } + }, "MessageGroupId": { "base": null, "refs": { @@ -801,11 +819,23 @@ "refs": { } }, + "ResourceArn": { + "base": null, + "refs": { + "DeadLetterConfig$Arn": "

The ARN of the SQS queue specified as the target for the dead-letter queue.

" + } + }, "ResourceNotFoundException": { "base": "

An entity that you specified does not exist.

", "refs": { } }, + "RetryPolicy": { + "base": "

A RetryPolicy object that includes information about the retry policy settings.

", + "refs": { + "Target$RetryPolicy": "

The RetryPolicy object that contains the retry policy configuration to use for the dead-letter queue.

" + } + }, "RoleArn": { "base": null, "refs": { diff --git a/models/apis/events/2015-10-07/api-2.json b/models/apis/events/2015-10-07/api-2.json index 6e55a18c65f..1f2a927ad70 100644 --- a/models/apis/events/2015-10-07/api-2.json +++ b/models/apis/events/2015-10-07/api-2.json @@ -582,6 +582,12 @@ "Name":{"shape":"EventSourceName"} } }, + "DeadLetterConfig":{ + "type":"structure", + "members":{ + "Arn":{"shape":"ResourceArn"} + } + }, "DeleteEventBusRequest":{ "type":"structure", "required":["Name"], @@ -990,6 +996,16 @@ }, "exception":true }, + "MaximumEventAgeInSeconds":{ + "type":"integer", + "max":86400, + "min":60 + }, + "MaximumRetryAttempts":{ + "type":"integer", + "max":185, + "min":0 + }, "MessageGroupId":{"type":"string"}, "NetworkConfiguration":{ "type":"structure", @@ -1297,12 +1313,24 @@ }, "exception":true }, + "ResourceArn":{ + "type":"string", + "max":1600, + "min":1 + }, "ResourceNotFoundException":{ "type":"structure", "members":{ }, "exception":true }, + "RetryPolicy":{ + "type":"structure", + "members":{ + "MaximumRetryAttempts":{"shape":"MaximumRetryAttempts"}, + "MaximumEventAgeInSeconds":{"shape":"MaximumEventAgeInSeconds"} + } + }, "RoleArn":{ "type":"string", "max":1600, @@ -1488,7 +1516,9 @@ "BatchParameters":{"shape":"BatchParameters"}, "SqsParameters":{"shape":"SqsParameters"}, "HttpParameters":{"shape":"HttpParameters"}, - "RedshiftDataParameters":{"shape":"RedshiftDataParameters"} + "RedshiftDataParameters":{"shape":"RedshiftDataParameters"}, + "DeadLetterConfig":{"shape":"DeadLetterConfig"}, + "RetryPolicy":{"shape":"RetryPolicy"} } }, "TargetArn":{ diff --git a/models/apis/events/2015-10-07/docs-2.json b/models/apis/events/2015-10-07/docs-2.json index bf59bd81201..e3e309d7f4b 100644 --- a/models/apis/events/2015-10-07/docs-2.json +++ b/models/apis/events/2015-10-07/docs-2.json @@ -150,6 +150,12 @@ "refs": { } }, + "DeadLetterConfig": { + "base": "

A DeadLetterConfig object that contains information about a dead-letter queue configuration.

", + "refs": { + "Target$DeadLetterConfig": "

The DeadLetterConfig that defines the target queue to send dead-letter queue events to.

" + } + }, "DeleteEventBusRequest": { "base": null, "refs": { @@ -537,6 +543,18 @@ "refs": { } }, + "MaximumEventAgeInSeconds": { + "base": null, + "refs": { + "RetryPolicy$MaximumEventAgeInSeconds": "

The maximum amount of time, in seconds, to continue to make retry attempts.

" + } + }, + "MaximumRetryAttempts": { + "base": null, + "refs": { + "RetryPolicy$MaximumRetryAttempts": "

The maximum number of retry attempts to make before the request fails. Retry attempts continue until either the maximum number of attempts is made or until the duration of the MaximumEventAgeInSeconds is met.

" + } + }, "MessageGroupId": { "base": null, "refs": { @@ -801,11 +819,23 @@ "refs": { } }, + "ResourceArn": { + "base": null, + "refs": { + "DeadLetterConfig$Arn": "

The ARN of the SQS queue specified as the target for the dead-letter queue.

" + } + }, "ResourceNotFoundException": { "base": "

An entity that you specified does not exist.

", "refs": { } }, + "RetryPolicy": { + "base": "

A RetryPolicy object that includes information about the retry policy settings.

", + "refs": { + "Target$RetryPolicy": "

The RetryPolicy object that contains the retry policy configuration to use for the dead-letter queue.

" + } + }, "RoleArn": { "base": null, "refs": { diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index 64a2b0804bc..d1e359e49e8 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -7146,7 +7146,8 @@ "EnableCloudwatchLogsExports":{"shape":"LogTypeList"}, "ProcessorFeatures":{"shape":"ProcessorFeatureList"}, "UseDefaultProcessorFeatures":{"shape":"BooleanOptional"}, - "DeletionProtection":{"shape":"BooleanOptional"} + "DeletionProtection":{"shape":"BooleanOptional"}, + "MaxAllocatedStorage":{"shape":"IntegerOptional"} } }, "RestoreDBInstanceFromS3Result":{ @@ -7189,7 +7190,8 @@ "UseDefaultProcessorFeatures":{"shape":"BooleanOptional"}, "DBParameterGroupName":{"shape":"String"}, "DeletionProtection":{"shape":"BooleanOptional"}, - "SourceDbiResourceId":{"shape":"String"} + "SourceDbiResourceId":{"shape":"String"}, + "MaxAllocatedStorage":{"shape":"IntegerOptional"} } }, "RestoreDBInstanceToPointInTimeResult":{ diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 6f3ddc14563..ceb700cc0d7 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -526,7 +526,7 @@ } }, "CloudwatchLogsExportConfiguration": { - "base": "

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs will be exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

", + "base": "

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

The EnableLogTypes and DisableLogTypes arrays determine which logs will be exported (or not exported) to CloudWatch Logs. The values within these arrays depend on the DB engine being used.

For more information about exporting CloudWatch Logs for Amazon RDS DB instances, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

For more information about exporting CloudWatch Logs for Amazon Aurora DB clusters, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

", "refs": { "ModifyDBClusterMessage$CloudwatchLogsExportConfiguration": "

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB cluster.

", "ModifyDBInstanceMessage$CloudwatchLogsExportConfiguration": "

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance.

A change to the CloudwatchLogsExportConfiguration parameter is always applied to the DB instance immediately. Therefore, the ApplyImmediately parameter has no effect.

" @@ -2352,8 +2352,10 @@ "RestoreDBInstanceFromS3Message$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to allocate initially for the DB instance. For information about valid Iops values, see Amazon RDS Provisioned IOPS Storage to Improve Performance in the Amazon RDS User Guide.

", "RestoreDBInstanceFromS3Message$MonitoringInterval": "

The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0.

If MonitoringRoleArn is specified, then you must also set MonitoringInterval to a value other than 0.

Valid Values: 0, 1, 5, 10, 15, 30, 60

Default: 0

", "RestoreDBInstanceFromS3Message$PerformanceInsightsRetentionPeriod": "

The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731 (2 years).

", + "RestoreDBInstanceFromS3Message$MaxAllocatedStorage": "

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

", "RestoreDBInstanceToPointInTimeMessage$Port": "

The port number on which the database accepts connections.

Constraints: Value must be 1150-65535

Default: The same port as the original DB instance.

", "RestoreDBInstanceToPointInTimeMessage$Iops": "

The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance.

Constraints: Must be an integer greater than 1000.

SQL Server

Setting the IOPS value for the SQL Server database engine isn't supported.

", + "RestoreDBInstanceToPointInTimeMessage$MaxAllocatedStorage": "

The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.

", "ScalingConfiguration$MinCapacity": "

The minimum capacity for an Aurora DB cluster in serverless DB engine mode.

For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384.

The minimum capacity must be less than or equal to the maximum capacity.

", "ScalingConfiguration$MaxCapacity": "

The maximum capacity for an Aurora DB cluster in serverless DB engine mode.

For Aurora MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.

For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384.

The maximum capacity must be greater than or equal to the minimum capacity.

", "ScalingConfiguration$SecondsUntilAutoPause": "

The time, in seconds, before an Aurora DB cluster in serverless mode is paused.

", @@ -2499,7 +2501,7 @@ "CloudwatchLogsExportConfiguration$EnableLogTypes": "

The list of log types to enable.

", "CloudwatchLogsExportConfiguration$DisableLogTypes": "

The list of log types to disable.

", "CreateDBClusterMessage$EnableCloudwatchLogsExports": "

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

Aurora MySQL

Possible values are audit, error, general, and slowquery.

Aurora PostgreSQL

Possible values are postgresql and upgrade.

", - "CreateDBInstanceMessage$EnableCloudwatchLogsExports": "

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

MariaDB

Possible values are audit, error, general, and slowquery.

Microsoft SQL Server

Possible values are agent and error.

MySQL

Possible values are audit, error, general, and slowquery.

Oracle

Possible values are alert, audit, listener, and trace.

PostgreSQL

Possible values are postgresql and upgrade.

", + "CreateDBInstanceMessage$EnableCloudwatchLogsExports": "

The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Relational Database Service User Guide.

Amazon Aurora

Not applicable. CloudWatch Logs exports are managed by the DB cluster.

MariaDB

Possible values are audit, error, general, and slowquery.

Microsoft SQL Server

Possible values are agent and error.

MySQL

Possible values are audit, error, general, and slowquery.

Oracle

Possible values are alert, audit, listener, and trace.

PostgreSQL

Possible values are postgresql and upgrade.

", "CreateDBInstanceReadReplicaMessage$EnableCloudwatchLogsExports": "

The list of logs that the new DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

", "DBCluster$EnabledCloudwatchLogsExports": "

A list of log types that this DB cluster is configured to export to CloudWatch Logs.

Log types vary by DB engine. For information about the log types for each DB engine, see Amazon RDS Database Log Files in the Amazon Aurora User Guide.

", "DBEngineVersion$ExportableLogTypes": "

The types of logs that the database engine has available for export to CloudWatch Logs.

", @@ -2509,7 +2511,7 @@ "RestoreDBClusterFromS3Message$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

", "RestoreDBClusterFromSnapshotMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to Amazon CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

", "RestoreDBClusterToPointInTimeMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB cluster is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

", - "RestoreDBInstanceFromDBSnapshotMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon Aurora User Guide.

", + "RestoreDBInstanceFromDBSnapshotMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

", "RestoreDBInstanceFromS3Message$EnableCloudwatchLogsExports": "

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

", "RestoreDBInstanceToPointInTimeMessage$EnableCloudwatchLogsExports": "

The list of logs that the restored DB instance is to export to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see Publishing Database Logs to Amazon CloudWatch Logs in the Amazon RDS User Guide.

" } @@ -3452,7 +3454,7 @@ "CopyDBSnapshotMessage$KmsKeyId": "

The AWS KMS key ID for an encrypted DB snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

If you copy an encrypted DB snapshot from your AWS account, you can specify a value for this parameter to encrypt the copy with a new KMS encryption key. If you don't specify a value for this parameter, then the copy of the DB snapshot is encrypted with the same KMS key as the source DB snapshot.

If you copy an encrypted DB snapshot that is shared from another AWS account, then you must specify a value for this parameter.

If you specify this parameter when you copy an unencrypted snapshot, the copy is encrypted.

If you copy an encrypted snapshot to a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.

", "CopyDBSnapshotMessage$PreSignedUrl": "

The URL that contains a Signature Version 4 signed request for the CopyDBSnapshot API action in the source AWS Region that contains the source DB snapshot to copy.

You must specify this parameter when you copy an encrypted DB snapshot from another AWS Region by using the Amazon RDS API. Don't specify PreSignedUrl when you are copying an encrypted DB snapshot in the same AWS Region.

The presigned URL must be a valid request for the CopyDBSnapshot API action that can be executed in the source AWS Region that contains the encrypted DB snapshot to be copied. The presigned URL request must contain the following parameter values:

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (AWS Signature Version 4) and Signature Version 4 Signing Process.

If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a pre-signed URL that is a valid request for the operation that can be executed in the source AWS Region.

", "CopyDBSnapshotMessage$OptionGroupName": "

The name of an option group to associate with the copy of the snapshot.

Specify this option if you are copying a snapshot from one AWS Region to another, and your DB instance uses a nondefault option group. If your source DB instance uses Transparent Data Encryption for Oracle or Microsoft SQL Server, you must specify this option when copying across AWS Regions. For more information, see Option Group Considerations in the Amazon RDS User Guide.

", - "CopyOptionGroupMessage$SourceOptionGroupIdentifier": "

The identifier or ARN for the source option group. For information about creating an ARN, see Constructing an ARN for Amazon RDS in the Amazon RDS User Guide.

Constraints:

", + "CopyOptionGroupMessage$SourceOptionGroupIdentifier": "

The identifier for the source option group.

Constraints:

", "CopyOptionGroupMessage$TargetOptionGroupIdentifier": "

The identifier for the copied option group.

Constraints:

Example: my-option-group

", "CopyOptionGroupMessage$TargetOptionGroupDescription": "

The description for the copied option group.

", "CreateCustomAvailabilityZoneMessage$CustomAvailabilityZoneName": "

The name of the custom Availability Zone (AZ).

", @@ -3497,7 +3499,7 @@ "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window.

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

", "CreateDBInstanceMessage$DBParameterGroupName": "

The name of the DB parameter group to associate with this DB instance. If you do not specify a value, then the default DB parameter group for the specified DB engine and version is used.

Constraints:

", "CreateDBInstanceMessage$PreferredBackupWindow": "

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter. For more information, see The Backup Window in the Amazon RDS User Guide.

Amazon Aurora

Not applicable. The daily time range for creating automated backups is managed by the DB cluster.

The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see Adjusting the Preferred DB Instance Maintenance Window in the Amazon RDS User Guide.

Constraints:

", - "CreateDBInstanceMessage$EngineVersion": "

The version number of the database engine to use.

For a list of valid engine versions, use the DescribeDBEngineVersions action.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.

MariaDB

See MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.

Microsoft SQL Server

See Version and Feature Support on Amazon RDS in the Amazon RDS User Guide.

MySQL

See MySQL on Amazon RDS Versions in the Amazon RDS User Guide.

Oracle

See Oracle Database Engine Release Notes in the Amazon RDS User Guide.

PostgreSQL

See Supported PostgreSQL Database Versions in the Amazon RDS User Guide.

", + "CreateDBInstanceMessage$EngineVersion": "

The version number of the database engine to use.

For a list of valid engine versions, use the DescribeDBEngineVersions action.

The following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.

Amazon Aurora

Not applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.

MariaDB

See MariaDB on Amazon RDS Versions in the Amazon RDS User Guide.

Microsoft SQL Server

See Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.

MySQL

See MySQL on Amazon RDS Versions in the Amazon RDS User Guide.

Oracle

See Oracle Database Engine Release Notes in the Amazon RDS User Guide.

PostgreSQL

See Supported PostgreSQL Database Versions in the Amazon RDS User Guide.

", "CreateDBInstanceMessage$LicenseModel": "

License model information for this DB instance.

Valid values: license-included | bring-your-own-license | general-public-license

", "CreateDBInstanceMessage$OptionGroupName": "

Indicates that the DB instance should be associated with the specified option group.

Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance once it is associated with a DB instance

", "CreateDBInstanceMessage$CharacterSetName": "

For supported engines, indicates that the DB instance should be associated with the specified CharacterSet.

Amazon Aurora

Not applicable. The character set is managed by the DB cluster. For more information, see CreateDBCluster.

", @@ -3927,7 +3929,7 @@ "IPRange$CIDRIP": "

Specifies the IP range.

", "ImportInstallationMediaMessage$CustomAvailabilityZoneId": "

The identifier of the custom Availability Zone (AZ) to import the installation media to.

", "ImportInstallationMediaMessage$Engine": "

The name of the database engine to be used for this instance.

The list only includes supported DB engines that require an on-premises customer provided license.

Valid Values:

", - "ImportInstallationMediaMessage$EngineVersion": "

The version number of the database engine to use.

For a list of valid engine versions, call DescribeDBEngineVersions.

The following are the database engines and links to information about the major and minor versions. The list only includes DB engines that require an on-premises customer provided license.

Microsoft SQL Server

See Version and Feature Support on Amazon RDS in the Amazon RDS User Guide.

", + "ImportInstallationMediaMessage$EngineVersion": "

The version number of the database engine to use.

For a list of valid engine versions, call DescribeDBEngineVersions.

The following are the database engines and links to information about the major and minor versions. The list only includes DB engines that require an on-premises customer provided license.

Microsoft SQL Server

See Microsoft SQL Server Versions on Amazon RDS in the Amazon RDS User Guide.

", "ImportInstallationMediaMessage$EngineInstallationMediaPath": "

The path to the installation medium for the specified DB engine.

Example: SQLServerISO/en_sql_server_2016_enterprise_x64_dvd_8701793.iso

", "ImportInstallationMediaMessage$OSInstallationMediaPath": "

The path to the installation medium for the operating system associated with the specified DB engine.

Example: WindowsISO/en_windows_server_2016_x64_dvd_9327751.iso

", "InstallationMedia$InstallationMediaId": "

The installation medium ID.

", diff --git a/models/apis/rekognition/2016-06-27/api-2.json b/models/apis/rekognition/2016-06-27/api-2.json index 6f5a2e432a3..7e21730825e 100644 --- a/models/apis/rekognition/2016-06-27/api-2.json +++ b/models/apis/rekognition/2016-06-27/api-2.json @@ -2337,7 +2337,8 @@ "OutputConfig":{"shape":"OutputConfig"}, "TrainingDataResult":{"shape":"TrainingDataResult"}, "TestingDataResult":{"shape":"TestingDataResult"}, - "EvaluationResult":{"shape":"EvaluationResult"} + "EvaluationResult":{"shape":"EvaluationResult"}, + "ManifestSummary":{"shape":"GroundTruthManifest"} } }, "ProjectVersionDescriptions":{ @@ -2907,7 +2908,8 @@ "type":"structure", "members":{ "Input":{"shape":"TestingData"}, - "Output":{"shape":"TestingData"} + "Output":{"shape":"TestingData"}, + "Validation":{"shape":"ValidationData"} } }, "TextDetection":{ @@ -2962,7 +2964,8 @@ "type":"structure", "members":{ "Input":{"shape":"TrainingData"}, - "Output":{"shape":"TrainingData"} + "Output":{"shape":"TrainingData"}, + "Validation":{"shape":"ValidationData"} } }, "UInteger":{ @@ -2989,6 +2992,12 @@ "type":"list", "member":{"shape":"Url"} }, + "ValidationData":{ + "type":"structure", + "members":{ + "Assets":{"shape":"Assets"} + } + }, "VersionName":{ "type":"string", "max":255, diff --git a/models/apis/rekognition/2016-06-27/docs-2.json b/models/apis/rekognition/2016-06-27/docs-2.json index 1a38193f118..fa76512cd98 100644 --- a/models/apis/rekognition/2016-06-27/docs-2.json +++ b/models/apis/rekognition/2016-06-27/docs-2.json @@ -2,7 +2,7 @@ "version": "2.0", "service": "

This is the Amazon Rekognition API reference.

", "operations": { - "CompareFaces": "

Compares a face in the source input image with each of the 100 largest faces detected in the target input image.

If the source image contains multiple faces, the service detects the largest face and compares it with each face detected in the target image.

You pass the input and target images either as base64-encoded image bytes or as references to images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, role, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match.

By default, only faces with a similarity score of greater than or equal to 80% are returned in the response. You can change this value by specifying the SimilarityThreshold parameter.

CompareFaces also returns an array of faces that don't match the source image. For each face, it returns a bounding box, confidence value, landmarks, pose details, and quality. The response also returns information about the face in the source image, including the bounding box of the face and confidence value.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. Use QualityFilter to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default value is NONE.

To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection.

If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation.

If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error.

This is a stateless API operation. That is, data returned by this operation doesn't persist.

For an example, see Comparing Faces in Images in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:CompareFaces action.

", + "CompareFaces": "

Compares a face in the source input image with each of the 100 largest faces detected in the target input image.

If the source image contains multiple faces, the service detects the largest face and compares it with each face detected in the target image.

You pass the input and target images either as base64-encoded image bytes or as references to images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, role, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match.

By default, only faces with a similarity score of greater than or equal to 80% are returned in the response. You can change this value by specifying the SimilarityThreshold parameter.

CompareFaces also returns an array of faces that don't match the source image. For each face, it returns a bounding box, confidence value, landmarks, pose details, and quality. The response also returns information about the face in the source image, including the bounding box of the face and confidence value.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. Use QualityFilter to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default value is NONE.

If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation.

If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error.

This is a stateless API operation. That is, data returned by this operation doesn't persist.

For an example, see Comparing Faces in Images in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:CompareFaces action.

", "CreateCollection": "

Creates a collection in an AWS Region. You can add faces to the collection using the IndexFaces operation.

For example, you might create collections, one for each of your application users. A user can then index faces using the IndexFaces operation and persist results in a specific collection. Then, a user can search the collection for faces in the user-specific container.

When you create a collection, it is associated with the latest version of the face model version.

Collection names are case-sensitive.

This operation requires permissions to perform the rekognition:CreateCollection action.

", "CreateProject": "

Creates a new Amazon Rekognition Custom Labels project. A project is a logical grouping of resources (images, Labels, models) and operations (training, evaluation and detection).

This operation requires permissions to perform the rekognition:CreateProject action.

", "CreateProjectVersion": "

Creates a new version of a model and begins training. Models are managed as part of an Amazon Rekognition Custom Labels project. You can specify one training dataset and one testing dataset. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the version of the model.

Training takes a while to complete. You can get the current status by calling DescribeProjectVersions.

Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the model.

After evaluating the model, you start the model by calling StartProjectVersion.

This operation requires permissions to perform the rekognition:CreateProjectVersion action.

", @@ -34,7 +34,7 @@ "ListCollections": "

Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs.

For an example, see Listing Collections in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:ListCollections action.

", "ListFaces": "

Returns metadata for faces in the specified collection. This metadata includes information such as the bounding box coordinates, the confidence (that the bounding box contains a face), and face ID. For an example, see Listing Faces in a Collection in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:ListFaces action.

", "ListStreamProcessors": "

Gets a list of stream processors that you have created with CreateStreamProcessor.

", - "RecognizeCelebrities": "

Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

RecognizeCelebrities returns the 100 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces aren't among the largest 100 faces in the image.

For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the GetCelebrityInfo operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

", + "RecognizeCelebrities": "

Returns an array of celebrities recognized in the input image. For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

RecognizeCelebrities returns the 64 largest faces in the image. It lists recognized celebrities in the CelebrityFaces array and unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces aren't among the largest 64 faces in the image.

For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image.

Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the GetCelebrityInfo operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.

", "SearchFaces": "

For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection.

You can also search faces without indexing faces by using the SearchFacesByImage operation.

The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face.

For an example, see Searching for a Face Using Its Face ID in the Amazon Rekognition Developer Guide.

This operation requires permissions to perform the rekognition:SearchFaces action.

", "SearchFacesByImage": "

For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection.

To search for all faces in an input image, you might first call the IndexFaces operation, and then use the face IDs returned in subsequent calls to the SearchFaces operation.

You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation.

You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image.

For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide.

The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. Use QualityFilter to set the quality bar for filtering by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default value is NONE.

To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection.

This operation requires permissions to perform the rekognition:SearchFacesByImage action.

", "StartCelebrityRecognition": "

Starts asynchronous recognition of celebrities in a stored video.

Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityRecognition and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition.

For more information, see Recognizing Celebrities in the Amazon Rekognition Developer Guide.

", @@ -63,7 +63,7 @@ } }, "Asset": { - "base": "

Assets are the images that you use to train and evaluate a model version. Assets are referenced by Sagemaker GroundTruth manifest files.

", + "base": "

Assets are the images that you use to train and evaluate a model version. Assets can also contain validation information that you use to debug a failed model training.

", "refs": { "Assets$member": null } @@ -72,7 +72,8 @@ "base": null, "refs": { "TestingData$Assets": "

The assets used for testing.

", - "TrainingData$Assets": "

A Sagemaker GroundTruth manifest file that contains the training images (assets).

" + "TrainingData$Assets": "

A Sagemaker GroundTruth manifest file that contains the training images (assets).

", + "ValidationData$Assets": "

The assets that comprise the validation data.

" } }, "Attribute": { @@ -161,7 +162,7 @@ "CelebrityList": { "base": null, "refs": { - "RecognizeCelebritiesResponse$CelebrityFaces": "

Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 15 celebrities in an image.

" + "RecognizeCelebritiesResponse$CelebrityFaces": "

Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 64 celebrities in an image.

" } }, "CelebrityRecognition": { @@ -691,8 +692,8 @@ "EvaluationResult$F1Score": "

The F1 score for the evaluation of all labels. The F1 score metric evaluates the overall precision and recall performance of the model as a single value. A higher value indicates better precision and recall performance. A lower score indicates that precision, recall, or both are performing poorly.

", "ImageQuality$Brightness": "

Value representing brightness of the face. The service returns a value between 0 and 100 (inclusive). A higher value indicates a brighter face image.

", "ImageQuality$Sharpness": "

Value representing sharpness of the face. The service returns a value between 0 and 100 (inclusive). A higher value indicates a sharper face image.

", - "Landmark$X": "

The x-coordinate from the top left of the landmark expressed as the ratio of the width of the image. For example, if the image is 700 x 200 and the x-coordinate of the landmark is at 350 pixels, this value is 0.5.

", - "Landmark$Y": "

The y-coordinate from the top left of the landmark expressed as the ratio of the height of the image. For example, if the image is 700 x 200 and the y-coordinate of the landmark is at 100 pixels, this value is 0.5.

", + "Landmark$X": "

The x-coordinate of the landmark expressed as a ratio of the width of the image. The x-coordinate is measured from the left-side of the image. For example, if the image is 700 pixels wide and the x-coordinate of the landmark is at 350 pixels, this value is 0.5.

", + "Landmark$Y": "

The y-coordinate of the landmark expressed as a ratio of the height of the image. The y-coordinate is measured from the top of the image. For example, if the image height is 200 pixels and the y-coordinate of the landmark is at 50 pixels, this value is 0.25.

", "Point$X": "

The value of the X coordinate for a point on a Polygon.

", "Point$Y": "

The value of the Y coordinate for a point on a Polygon.

", "VideoMetadata$FrameRate": "

Number of frames per second in the video.

" @@ -814,9 +815,10 @@ } }, "GroundTruthManifest": { - "base": "

The S3 bucket that contains the Ground Truth manifest file.

", + "base": "

The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest file.

", "refs": { - "Asset$GroundTruthManifest": null + "Asset$GroundTruthManifest": null, + "ProjectVersionDescription$ManifestSummary": "

The location of the summary manifest. The summary manifest provides aggregate data validation results for the training and test datasets.

" } }, "HumanLoopActivationConditionsEvaluationResults": { @@ -1583,7 +1585,7 @@ "SegmentDetections": { "base": null, "refs": { - "GetSegmentDetectionResponse$Segments": "

An array of segments detected in a video.

" + "GetSegmentDetectionResponse$Segments": "

An array of segments detected in a video. The array is sorted by the segment types (TECHNICAL_CUE or SHOT) specified in the SegmentTypes input parameter of StartSegmentDetection. Within each segment type the array is sorted by timestamp values.

" } }, "SegmentType": { @@ -1912,9 +1914,9 @@ } }, "TestingDataResult": { - "base": "

A Sagemaker Groundtruth format manifest file representing the dataset used for testing.

", + "base": "

Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.

", "refs": { - "ProjectVersionDescription$TestingDataResult": "

The manifest file that represents the testing results.

" + "ProjectVersionDescription$TestingDataResult": "

Contains information about the testing results.

" } }, "TextDetection": { @@ -1970,8 +1972,8 @@ "LabelDetection$Timestamp": "

Time, in milliseconds from the start of the video, that the label was detected.

", "PersonDetection$Timestamp": "

The time, in milliseconds from the start of the video, that the person's path was tracked.

", "PersonMatch$Timestamp": "

The time, in milliseconds from the beginning of the video, that the person was matched in the video.

", - "SegmentDetection$StartTimestampMillis": "

The start time of the detected segment in milliseconds from the start of the video.

", - "SegmentDetection$EndTimestampMillis": "

The end time of the detected segment, in milliseconds, from the start of the video.

", + "SegmentDetection$StartTimestampMillis": "

The start time of the detected segment in milliseconds from the start of the video. This value is rounded down. For example, if the actual timestamp is 100.6667 milliseconds, Amazon Rekognition Video returns a value of 100 millis.

", + "SegmentDetection$EndTimestampMillis": "

The end time of the detected segment, in milliseconds, from the start of the video. This value is rounded down.

", "TextDetectionResult$Timestamp": "

The time, in milliseconds from the start of the video, that the text was detected.

" } }, @@ -1984,9 +1986,9 @@ } }, "TrainingDataResult": { - "base": "

A Sagemaker Groundtruth format manifest file that represents the dataset used for training.

", + "base": "

Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.

", "refs": { - "ProjectVersionDescription$TrainingDataResult": "

The manifest file that represents the training results.

" + "ProjectVersionDescription$TrainingDataResult": "

Contains information about the training results.

" } }, "UInteger": { @@ -2007,11 +2009,11 @@ "refs": { "AudioMetadata$DurationMillis": "

The duration of the audio stream in milliseconds.

", "AudioMetadata$SampleRate": "

The sample rate for the audio stream.

", - "AudioMetadata$NumberOfChannels": "

The number of audio channels in the segement.

", + "AudioMetadata$NumberOfChannels": "

The number of audio channels in the segment.

", "DescribeCollectionResponse$FaceCount": "

The number of faces that are indexed into the collection. To index faces into a collection, use IndexFaces.

", "ProjectVersionDescription$BillableTrainingTimeInSeconds": "

The duration, in seconds, that the model version has been billed for training. This value is only returned if the model version has been successfully trained.

", "SegmentDetection$DurationMillis": "

The duration of the detected segment in milliseconds.

", - "ShotSegment$Index": "

An Identifier for a shot detection segment detected in a video

", + "ShotSegment$Index": "

An Identifier for a shot detection segment detected in a video.

", "VideoMetadata$DurationMillis": "

Length of the video in milliseconds.

", "VideoMetadata$FrameHeight": "

Vertical pixel dimension of the video.

", "VideoMetadata$FrameWidth": "

Horizontal pixel dimension of the video.

" @@ -2043,6 +2045,13 @@ "GetCelebrityInfoResponse$Urls": "

An array of URLs pointing to additional celebrity information.

" } }, + "ValidationData": { + "base": "

Contains the Amazon S3 bucket location of the validation data for a model training job.

The validation data includes error information for individual JSON lines in the dataset. For more information, see Debugging a Failed Model Training in the Amazon Rekognition Custom Labels Developer Guide.

You get the ValidationData object for the training dataset (TrainingDataResult) and the test dataset (TestingDataResult) by calling DescribeProjectVersions.

The assets array contains a single Asset object. The GroundTruthManifest field of the Asset object contains the S3 bucket location of the validation data.

", + "refs": { + "TestingDataResult$Validation": "

The location of the data validation manifest. The data validation manifest is created for the test dataset during model training.

", + "TrainingDataResult$Validation": "

The location of the data validation manifest. The data validation manifest is created for the training dataset during model training.

" + } + }, "VersionName": { "base": null, "refs": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index fb99f1aa8db..a2b9aa7064a 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -8408,7 +8408,8 @@ "sitara_am57x", "amba_cv22", "x86_win32", - "x86_win64" + "x86_win64", + "coreml" ] }, "TargetObjectiveMetricValue":{"type":"float"}, diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 755c39b98f3..bb1825d7fce 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -9,7 +9,7 @@ "CreateAutoMLJob": "

Creates an Autopilot job.

Find the best performing model after you run an Autopilot job by calling . Deploy that model by following the steps described in Step 6.1: Deploy the Model to Amazon SageMaker Hosting Services.

For information about how to use Autopilot, see Automate Model Development with Amazon SageMaker Autopilot.

", "CreateCodeRepository": "

Creates a Git repository as a resource in your Amazon SageMaker account. You can associate the repository with notebook instances so that you can use Git source control for the notebooks you create. The Git repository is a resource in your Amazon SageMaker account, so it can be associated with more than one notebook instance, and it persists independently from the lifecycle of any notebook instances it is associated with.

The repository can be hosted either in AWS CodeCommit or in any other Git repository.

", "CreateCompilationJob": "

Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.

If you choose to host your model using Amazon SageMaker hosting services, you can use the resulting model artifacts as part of the model. You can also use the artifacts with AWS IoT Greengrass. In that case, deploy them as an ML resource.

In the request body, you provide the following:

You can also provide a Tag to track the model compilation job's resource use and costs. The response body contains the CompilationJobArn for the compiled job.

To stop a model compilation job, use StopCompilationJob. To get information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model compilation jobs, use ListCompilationJobs.

", - "CreateDomain": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the VPC mode that's chosen when you onboard to Studio. The following options are available:

VpcOnly mode

When you specify VpcOnly, you must specify the following:

For more information, see:

", + "CreateDomain": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. An AWS account is limited to one domain per region. Users within a domain can share notebook files and other artifacts with each other.

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

VPC configuration

All SageMaker Studio traffic between the domain and the EFS volume is through the specified VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to Studio. The following options are available:

VpcOnly network access type

When you choose VpcOnly, you must specify the following:

For more information, see:

", "CreateEndpoint": "

Creates an endpoint using the endpoint configuration specified in the request. Amazon SageMaker uses the endpoint to provision resources and deploy models. You create the endpoint configuration with the CreateEndpointConfig API.

Use this API to deploy models using Amazon SageMaker hosting services.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

You must not delete an EndpointConfig that is in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

The endpoint name must be unique within an AWS Region in your AWS account.

When it receives the request, Amazon SageMaker creates the endpoint, launches the resources (ML compute instances), and deploys the model(s) on them.

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

When Amazon SageMaker receives the request, it sets the endpoint status to Creating. After it creates the endpoint, it sets the status to InService. Amazon SageMaker can then process incoming requests for inferences. To check the status of an endpoint, use the DescribeEndpoint API.

If any of the models hosted at this endpoint get model data from an Amazon S3 location, Amazon SageMaker uses AWS Security Token Service to download model artifacts from the S3 path you provided. AWS STS is activated in your IAM user account by default. If you previously deactivated AWS STS for a region, you need to reactivate AWS STS for that region. For more information, see Activating and Deactivating AWS STS in an AWS Region in the AWS Identity and Access Management User Guide.

", "CreateEndpointConfig": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In the configuration, you identify one or more models, created using the CreateModel API, to deploy and the resources that you want Amazon SageMaker to provision. Then you call the CreateEndpoint API.

Use this API if you want to use Amazon SageMaker hosting services to deploy models into production.

In the request, you define a ProductionVariant, for each model that you want to deploy. Each ProductionVariant parameter also describes the resources that you want Amazon SageMaker to provision. This includes the number and type of ML compute instances to deploy.

If you are hosting multiple models, you also assign a VariantWeight to specify how much traffic you want to allocate to each model. For example, suppose that you want to host two models, A and B, and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to model B.

For an example that calls this method when deploying a model to Amazon SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (AWS SDK for Python (Boto 3)).

When you call CreateEndpoint, a load call is made to DynamoDB to verify that your endpoint configuration exists. When you read data from a DynamoDB table supporting Eventually Consistent Reads , the response might not reflect the results of a recently completed write operation. The response might include some stale data. If the dependent entities are not yet in DynamoDB, this causes a validation error. If you repeat your read request after a short time, the response should return the latest data. So retry logic is recommended to handle these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

", "CreateExperiment": "

Creates an SageMaker experiment. An experiment is a collection of trials that are observed, compared and evaluated as a group. A trial is a set of steps, called trial components, that produce a machine learning model.

The goal of an experiment is to determine the components that produce the best model. Multiple trials are performed, each one isolating and measuring the impact of a change to one or more inputs, while keeping the remaining inputs constant.

When you use Amazon SageMaker Studio or the Amazon SageMaker Python SDK, all experiments, trials, and trial components are automatically tracked, logged, and indexed. When you use the AWS SDK for Python (Boto), you must use the logging APIs provided by the SDK.

You can add tags to experiments, trials, trial components and then use the Search API to search for the tags.

To add a description to an experiment, specify the optional Description parameter. To add a description later, or to change the description, call the UpdateExperiment API.

To get a list of all your experiments, call the ListExperiments API. To view an experiment's properties, call the DescribeExperiment API. To get a list of all the trials associated with an experiment, call the ListTrials API. To create a trial call the CreateTrial API.

", @@ -959,7 +959,7 @@ "CompilerOptions": { "base": null, "refs": { - "OutputConfig$CompilerOptions": "

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compliations. For any other cases, it is optional to specify CompilerOptions.

" + "OutputConfig$CompilerOptions": "

Specifies additional parameters for compiler options in JSON format. The compiler options are TargetPlatform specific. It is required for NVIDIA accelerators and highly recommended for CPU compilations. For any other cases, it is optional to specify CompilerOptions.

" } }, "CompressionType": { @@ -1457,7 +1457,7 @@ "DataInputConfig": { "base": null, "refs": { - "InputConfig$DataInputConfig": "

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

" + "InputConfig$DataInputConfig": "

Specifies the name and shape of the expected data inputs for your trained model with a JSON dictionary form. The data inputs are InputConfig$Framework specific.

DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice (ML Model format):

CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models. CoreML conversion examples:

" } }, "DataProcessing": { diff --git a/models/apis/sns/2010-03-31/docs-2.json b/models/apis/sns/2010-03-31/docs-2.json index d1a76172f0b..7909ebd3860 100644 --- a/models/apis/sns/2010-03-31/docs-2.json +++ b/models/apis/sns/2010-03-31/docs-2.json @@ -674,7 +674,7 @@ "base": null, "refs": { "CheckIfPhoneNumberIsOptedOutResponse$isOptedOut": "

Indicates whether the phone number is opted out:

", - "SubscribeInput$ReturnSubscriptionArn": "

Sets whether the response from the Subscribe request includes the subscription ARN, even if the subscription is not yet confirmed.

The default value is false.

" + "SubscribeInput$ReturnSubscriptionArn": "

Sets whether the response from the Subscribe request includes the subscription ARN, even if the subscription is not yet confirmed.

If you set this parameter to true, the response includes the ARN in all cases, even if the subscription is not yet confirmed. In addition to the ARN for confirmed subscriptions, the response also includes the pending subscription ARN value for subscriptions that aren't yet confirmed. A subscription becomes confirmed when the subscriber calls the ConfirmSubscription action with a confirmation token.

The default value is false.

" } }, "delegate": { diff --git a/service/cloudwatchevents/api.go b/service/cloudwatchevents/api.go index 66aa4bf0cec..0081e438bfd 100644 --- a/service/cloudwatchevents/api.go +++ b/service/cloudwatchevents/api.go @@ -3653,6 +3653,44 @@ func (s DeactivateEventSourceOutput) GoString() string { return s.String() } +// A DeadLetterConfig object that contains information about a dead-letter queue +// configuration. +type DeadLetterConfig struct { + _ struct{} `type:"structure"` + + // The ARN of the SQS queue specified as the target for the dead-letter queue. + Arn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeadLetterConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeadLetterConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeadLetterConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeadLetterConfig"} + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *DeadLetterConfig) SetArn(v string) *DeadLetterConfig { + s.Arn = &v + return s +} + type DeleteEventBusInput struct { _ struct{} `type:"structure"` @@ -7456,6 +7494,54 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } +// A RetryPolicy object that includes information about the retry policy settings. +type RetryPolicy struct { + _ struct{} `type:"structure"` + + // The maximum amount of time, in seconds, to continue to make retry attempts. + MaximumEventAgeInSeconds *int64 `min:"60" type:"integer"` + + // The maximum number of retry attempts to make before the request fails. Retry + // attempts continue until either the maximum number of attempts is made or + // until the duration of the MaximumEventAgeInSeconds is met. + MaximumRetryAttempts *int64 `type:"integer"` +} + +// String returns the string representation +func (s RetryPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetryPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetryPolicy"} + if s.MaximumEventAgeInSeconds != nil && *s.MaximumEventAgeInSeconds < 60 { + invalidParams.Add(request.NewErrParamMinValue("MaximumEventAgeInSeconds", 60)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumEventAgeInSeconds sets the MaximumEventAgeInSeconds field's value. +func (s *RetryPolicy) SetMaximumEventAgeInSeconds(v int64) *RetryPolicy { + s.MaximumEventAgeInSeconds = &v + return s +} + +// SetMaximumRetryAttempts sets the MaximumRetryAttempts field's value. +func (s *RetryPolicy) SetMaximumRetryAttempts(v int64) *RetryPolicy { + s.MaximumRetryAttempts = &v + return s +} + // Contains information about a rule in Amazon EventBridge. type Rule struct { _ struct{} `type:"structure"` @@ -7855,6 +7941,10 @@ type Target struct { // in the AWS Batch User Guide. BatchParameters *BatchParameters `type:"structure"` + // The DeadLetterConfig that defines the target queue to send dead-letter queue + // events to. + DeadLetterConfig *DeadLetterConfig `type:"structure"` + // Contains the Amazon ECS task definition and task count to be used, if the // event target is an Amazon ECS task. For more information about Amazon ECS // tasks, see Task Definitions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) @@ -7902,6 +7992,10 @@ type Target struct { // events. RedshiftDataParameters *RedshiftDataParameters `type:"structure"` + // The RetryPolicy object that contains the retry policy configuration to use + // for the dead-letter queue. + RetryPolicy *RetryPolicy `type:"structure"` + // The Amazon Resource Name (ARN) of the IAM role to be used for this target // when the rule is triggered. If one rule triggers multiple targets, you can // use a different IAM role for each target. @@ -7950,6 +8044,11 @@ func (s *Target) Validate() error { invalidParams.AddNested("BatchParameters", err.(request.ErrInvalidParams)) } } + if s.DeadLetterConfig != nil { + if err := s.DeadLetterConfig.Validate(); err != nil { + invalidParams.AddNested("DeadLetterConfig", err.(request.ErrInvalidParams)) + } + } if s.EcsParameters != nil { if err := s.EcsParameters.Validate(); err != nil { invalidParams.AddNested("EcsParameters", err.(request.ErrInvalidParams)) @@ -7970,6 +8069,11 @@ func (s *Target) Validate() error { invalidParams.AddNested("RedshiftDataParameters", err.(request.ErrInvalidParams)) } } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } + } if s.RunCommandParameters != nil { if err := s.RunCommandParameters.Validate(); err != nil { invalidParams.AddNested("RunCommandParameters", err.(request.ErrInvalidParams)) @@ -7994,6 +8098,12 @@ func (s *Target) SetBatchParameters(v *BatchParameters) *Target { return s } +// SetDeadLetterConfig sets the DeadLetterConfig field's value. +func (s *Target) SetDeadLetterConfig(v *DeadLetterConfig) *Target { + s.DeadLetterConfig = v + return s +} + // SetEcsParameters sets the EcsParameters field's value. func (s *Target) SetEcsParameters(v *EcsParameters) *Target { s.EcsParameters = v @@ -8042,6 +8152,12 @@ func (s *Target) SetRedshiftDataParameters(v *RedshiftDataParameters) *Target { return s } +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *Target) SetRetryPolicy(v *RetryPolicy) *Target { + s.RetryPolicy = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *Target) SetRoleArn(v string) *Target { s.RoleArn = &v diff --git a/service/costexplorer/api.go b/service/costexplorer/api.go index ffd4832b8b1..3f263b9c27e 100644 --- a/service/costexplorer/api.go +++ b/service/costexplorer/api.go @@ -3308,6 +3308,10 @@ type CostCategory struct { // Name is a required field Name *string `min:"1" type:"string" required:"true"` + // The list of processing statuses for Cost Management products for a specific + // cost category. + ProcessingStatus []*CostCategoryProcessingStatus `type:"list"` + // The rule schema version in this particular Cost Category. // // RuleVersion is a required field @@ -3355,6 +3359,12 @@ func (s *CostCategory) SetName(v string) *CostCategory { return s } +// SetProcessingStatus sets the ProcessingStatus field's value. +func (s *CostCategory) SetProcessingStatus(v []*CostCategoryProcessingStatus) *CostCategory { + s.ProcessingStatus = v + return s +} + // SetRuleVersion sets the RuleVersion field's value. func (s *CostCategory) SetRuleVersion(v string) *CostCategory { s.RuleVersion = &v @@ -3367,6 +3377,40 @@ func (s *CostCategory) SetRules(v []*CostCategoryRule) *CostCategory { return s } +// The list of processing statuses for Cost Management products for a specific +// cost category. +type CostCategoryProcessingStatus struct { + _ struct{} `type:"structure"` + + // The Cost Management product name of the applied status. + Component *string `type:"string" enum:"CostCategoryStatusComponent"` + + // The process status for a specific cost category. + Status *string `type:"string" enum:"CostCategoryStatus"` +} + +// String returns the string representation +func (s CostCategoryProcessingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CostCategoryProcessingStatus) GoString() string { + return s.String() +} + +// SetComponent sets the Component field's value. +func (s *CostCategoryProcessingStatus) SetComponent(v string) *CostCategoryProcessingStatus { + s.Component = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CostCategoryProcessingStatus) SetStatus(v string) *CostCategoryProcessingStatus { + s.Status = &v + return s +} + // A reference to a Cost Category containing only enough information to identify // the Cost Category. // @@ -3389,6 +3433,13 @@ type CostCategoryReference struct { // The number of rules associated with a specific Cost Category. NumberOfRules *int64 `type:"integer"` + + // The list of processing statuses for Cost Management products for a specific + // cost category. + ProcessingStatus []*CostCategoryProcessingStatus `type:"list"` + + // A list of unique cost category values in a specific cost category. + Values []*string `type:"list"` } // String returns the string representation @@ -3431,6 +3482,18 @@ func (s *CostCategoryReference) SetNumberOfRules(v int64) *CostCategoryReference return s } +// SetProcessingStatus sets the ProcessingStatus field's value. +func (s *CostCategoryReference) SetProcessingStatus(v []*CostCategoryProcessingStatus) *CostCategoryReference { + s.ProcessingStatus = v + return s +} + +// SetValues sets the Values field's value. +func (s *CostCategoryReference) SetValues(v []*string) *CostCategoryReference { + s.Values = v + return s +} + // Rules are processed in order. If there are multiple rules that match the // line item, then the first rule to match is used to determine that Cost Category // value. @@ -3513,6 +3576,11 @@ type CostCategoryValues struct { // The unique name of the Cost Category. Key *string `min:"1" type:"string"` + // The match options that you can use to filter your results. MatchOptions is + // only applicable for only applicable for actions related to cost category. + // The default values for MatchOptions is EQUALS and CASE_SENSITIVE. + MatchOptions []*string `type:"list"` + // The specific value of the Cost Category. Values []*string `type:"list"` } @@ -3546,6 +3614,12 @@ func (s *CostCategoryValues) SetKey(v string) *CostCategoryValues { return s } +// SetMatchOptions sets the MatchOptions field's value. +func (s *CostCategoryValues) SetMatchOptions(v []*string) *CostCategoryValues { + s.MatchOptions = v + return s +} + // SetValues sets the Values field's value. func (s *CostCategoryValues) SetValues(v []*string) *CostCategoryValues { s.Values = v @@ -11523,6 +11597,34 @@ func CostCategoryRuleVersion_Values() []string { } } +const ( + // CostCategoryStatusProcessing is a CostCategoryStatus enum value + CostCategoryStatusProcessing = "PROCESSING" + + // CostCategoryStatusApplied is a CostCategoryStatus enum value + CostCategoryStatusApplied = "APPLIED" +) + +// CostCategoryStatus_Values returns all elements of the CostCategoryStatus enum +func CostCategoryStatus_Values() []string { + return []string{ + CostCategoryStatusProcessing, + CostCategoryStatusApplied, + } +} + +const ( + // CostCategoryStatusComponentCostExplorer is a CostCategoryStatusComponent enum value + CostCategoryStatusComponentCostExplorer = "COST_EXPLORER" +) + +// CostCategoryStatusComponent_Values returns all elements of the CostCategoryStatusComponent enum +func CostCategoryStatusComponent_Values() []string { + return []string{ + CostCategoryStatusComponentCostExplorer, + } +} + const ( // DimensionAz is a Dimension enum value DimensionAz = "AZ" diff --git a/service/ec2/api.go b/service/ec2/api.go index c5b880d18cc..a0bb5c159dd 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -37294,16 +37294,22 @@ func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressI output = &RevokeSecurityGroupEgressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // RevokeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud. // // [VPC only] Removes the specified egress rules from a security group for EC2-VPC. -// This action doesn't apply to security groups for use in EC2-Classic. To remove -// a rule, the values that you specify (for example, ports) must match the existing -// rule's values exactly. +// This action does not apply to security groups for use in EC2-Classic. To +// remove a rule, the values that you specify (for example, ports) must match +// the existing rule's values exactly. +// +// [Default VPC] If the values you specify do not match the existing rule's +// values, no error is returned, and the output describes the security group +// rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source // security group. For the TCP and UDP protocols, you must also specify the @@ -37381,7 +37387,6 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres output = &RevokeSecurityGroupIngressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -37391,9 +37396,12 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres // the values that you specify (for example, ports) must match the existing // rule's values exactly. // -// [EC2-Classic only] If the values you specify do not match the existing rule's -// values, no error is returned. Use DescribeSecurityGroups to verify that the -// rule has been removed. +// [EC2-Classic , default VPC] If the values you specify do not match the existing +// rule's values, no error is returned, and the output describes the security +// group rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the CIDR range or source security // group. For the TCP and UDP protocols, you must also specify the destination @@ -102559,6 +102567,13 @@ func (s *RevokeSecurityGroupEgressInput) SetToPort(v int64) *RevokeSecurityGroup type RevokeSecurityGroupEgressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The outbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -102571,6 +102586,18 @@ func (s RevokeSecurityGroupEgressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupEgressOutput) SetReturn(v bool) *RevokeSecurityGroupEgressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupEgressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupEgressOutput { + s.UnknownIpPermissions = v + return s +} + type RevokeSecurityGroupIngressInput struct { _ struct{} `type:"structure"` @@ -102698,6 +102725,13 @@ func (s *RevokeSecurityGroupIngressInput) SetToPort(v int64) *RevokeSecurityGrou type RevokeSecurityGroupIngressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The inbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -102710,6 +102744,18 @@ func (s RevokeSecurityGroupIngressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupIngressOutput) SetReturn(v bool) *RevokeSecurityGroupIngressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupIngressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupIngressOutput { + s.UnknownIpPermissions = v + return s +} + // Describes a route in a route table. type Route struct { _ struct{} `type:"structure"` diff --git a/service/eventbridge/api.go b/service/eventbridge/api.go index 3f93ecf0b48..e5bc03a8db3 100644 --- a/service/eventbridge/api.go +++ b/service/eventbridge/api.go @@ -3653,6 +3653,44 @@ func (s DeactivateEventSourceOutput) GoString() string { return s.String() } +// A DeadLetterConfig object that contains information about a dead-letter queue +// configuration. +type DeadLetterConfig struct { + _ struct{} `type:"structure"` + + // The ARN of the SQS queue specified as the target for the dead-letter queue. + Arn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeadLetterConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeadLetterConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeadLetterConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeadLetterConfig"} + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *DeadLetterConfig) SetArn(v string) *DeadLetterConfig { + s.Arn = &v + return s +} + type DeleteEventBusInput struct { _ struct{} `type:"structure"` @@ -7456,6 +7494,54 @@ func (s *ResourceNotFoundException) RequestID() string { return s.RespMetadata.RequestID } +// A RetryPolicy object that includes information about the retry policy settings. +type RetryPolicy struct { + _ struct{} `type:"structure"` + + // The maximum amount of time, in seconds, to continue to make retry attempts. + MaximumEventAgeInSeconds *int64 `min:"60" type:"integer"` + + // The maximum number of retry attempts to make before the request fails. Retry + // attempts continue until either the maximum number of attempts is made or + // until the duration of the MaximumEventAgeInSeconds is met. + MaximumRetryAttempts *int64 `type:"integer"` +} + +// String returns the string representation +func (s RetryPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetryPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetryPolicy"} + if s.MaximumEventAgeInSeconds != nil && *s.MaximumEventAgeInSeconds < 60 { + invalidParams.Add(request.NewErrParamMinValue("MaximumEventAgeInSeconds", 60)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumEventAgeInSeconds sets the MaximumEventAgeInSeconds field's value. +func (s *RetryPolicy) SetMaximumEventAgeInSeconds(v int64) *RetryPolicy { + s.MaximumEventAgeInSeconds = &v + return s +} + +// SetMaximumRetryAttempts sets the MaximumRetryAttempts field's value. +func (s *RetryPolicy) SetMaximumRetryAttempts(v int64) *RetryPolicy { + s.MaximumRetryAttempts = &v + return s +} + // Contains information about a rule in Amazon EventBridge. type Rule struct { _ struct{} `type:"structure"` @@ -7855,6 +7941,10 @@ type Target struct { // in the AWS Batch User Guide. BatchParameters *BatchParameters `type:"structure"` + // The DeadLetterConfig that defines the target queue to send dead-letter queue + // events to. + DeadLetterConfig *DeadLetterConfig `type:"structure"` + // Contains the Amazon ECS task definition and task count to be used, if the // event target is an Amazon ECS task. For more information about Amazon ECS // tasks, see Task Definitions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) @@ -7902,6 +7992,10 @@ type Target struct { // events. RedshiftDataParameters *RedshiftDataParameters `type:"structure"` + // The RetryPolicy object that contains the retry policy configuration to use + // for the dead-letter queue. + RetryPolicy *RetryPolicy `type:"structure"` + // The Amazon Resource Name (ARN) of the IAM role to be used for this target // when the rule is triggered. If one rule triggers multiple targets, you can // use a different IAM role for each target. @@ -7950,6 +8044,11 @@ func (s *Target) Validate() error { invalidParams.AddNested("BatchParameters", err.(request.ErrInvalidParams)) } } + if s.DeadLetterConfig != nil { + if err := s.DeadLetterConfig.Validate(); err != nil { + invalidParams.AddNested("DeadLetterConfig", err.(request.ErrInvalidParams)) + } + } if s.EcsParameters != nil { if err := s.EcsParameters.Validate(); err != nil { invalidParams.AddNested("EcsParameters", err.(request.ErrInvalidParams)) @@ -7970,6 +8069,11 @@ func (s *Target) Validate() error { invalidParams.AddNested("RedshiftDataParameters", err.(request.ErrInvalidParams)) } } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } + } if s.RunCommandParameters != nil { if err := s.RunCommandParameters.Validate(); err != nil { invalidParams.AddNested("RunCommandParameters", err.(request.ErrInvalidParams)) @@ -7994,6 +8098,12 @@ func (s *Target) SetBatchParameters(v *BatchParameters) *Target { return s } +// SetDeadLetterConfig sets the DeadLetterConfig field's value. +func (s *Target) SetDeadLetterConfig(v *DeadLetterConfig) *Target { + s.DeadLetterConfig = v + return s +} + // SetEcsParameters sets the EcsParameters field's value. func (s *Target) SetEcsParameters(v *EcsParameters) *Target { s.EcsParameters = v @@ -8042,6 +8152,12 @@ func (s *Target) SetRedshiftDataParameters(v *RedshiftDataParameters) *Target { return s } +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *Target) SetRetryPolicy(v *RetryPolicy) *Target { + s.RetryPolicy = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *Target) SetRoleArn(v string) *Target { s.RoleArn = &v diff --git a/service/rds/api.go b/service/rds/api.go index 84cc40469ca..fe5f2c89b80 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -15563,9 +15563,15 @@ func (s *CharacterSet) SetCharacterSetName(v string) *CharacterSet { // // The EnableLogTypes and DisableLogTypes arrays determine which logs will be // exported (or not exported) to CloudWatch Logs. The values within these arrays -// depend on the DB engine being used. For more information, see Publishing -// Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) +// depend on the DB engine being used. +// +// For more information about exporting CloudWatch Logs for Amazon RDS DB instances, +// see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon RDS User Guide. +// +// For more information about exporting CloudWatch Logs for Amazon Aurora DB +// clusters, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) +// in the Amazon Aurora User Guide. type CloudwatchLogsExportConfiguration struct { _ struct{} `type:"structure"` @@ -16483,21 +16489,12 @@ func (s *CopyDBSnapshotOutput) SetDBSnapshot(v *DBSnapshot) *CopyDBSnapshotOutpu type CopyOptionGroupInput struct { _ struct{} `type:"structure"` - // The identifier or ARN for the source option group. For information about - // creating an ARN, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) - // in the Amazon RDS User Guide. + // The identifier for the source option group. // // Constraints: // // * Must specify a valid option group. // - // * If the source option group is in the same AWS Region as the copy, specify - // a valid option group identifier, for example my-option-group, or a valid - // ARN. - // - // * If the source option group is in a different AWS Region than the copy, - // specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options. - // // SourceOptionGroupIdentifier is a required field SourceOptionGroupIdentifier *string `type:"string" required:"true"` @@ -18037,6 +18034,10 @@ type CreateDBInstanceInput struct { // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon Relational Database Service User Guide. // + // Amazon Aurora + // + // Not applicable. CloudWatch Logs exports are managed by the DB cluster. + // // MariaDB // // Possible values are audit, error, general, and slowquery. @@ -18153,7 +18154,7 @@ type CreateDBInstanceInput struct { // // Microsoft SQL Server // - // See Version and Feature Support on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.FeatureSupport) + // See Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) // in the Amazon RDS User Guide. // // MySQL @@ -31602,7 +31603,7 @@ type ImportInstallationMediaInput struct { // // Microsoft SQL Server // - // See Version and Feature Support on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.FeatureSupport) + // See Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) // in the Amazon RDS User Guide. // // EngineVersion is a required field @@ -38940,8 +38941,8 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // The list of logs that the restored DB instance is to export to CloudWatch // Logs. The values in the list depend on the DB engine being used. For more - // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) - // in the Amazon Aurora User Guide. + // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` // A value that indicates whether to enable mapping of AWS Identity and Access @@ -39458,6 +39459,10 @@ type RestoreDBInstanceFromS3Input struct { // * Can't be a reserved word for the chosen database engine. MasterUsername *string `type:"string"` + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. To disable collecting Enhanced Monitoring // metrics, specify 0. @@ -39793,6 +39798,12 @@ func (s *RestoreDBInstanceFromS3Input) SetMasterUsername(v string) *RestoreDBIns return s } +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *RestoreDBInstanceFromS3Input) SetMaxAllocatedStorage(v int64) *RestoreDBInstanceFromS3Input { + s.MaxAllocatedStorage = &v + return s +} + // SetMonitoringInterval sets the MonitoringInterval field's value. func (s *RestoreDBInstanceFromS3Input) SetMonitoringInterval(v int64) *RestoreDBInstanceFromS3Input { s.MonitoringInterval = &v @@ -40084,6 +40095,10 @@ type RestoreDBInstanceToPointInTimeInput struct { // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + // A value that indicates whether the DB instance is a Multi-AZ deployment. // // Constraint: You can't specify the AvailabilityZone parameter if the DB instance @@ -40309,6 +40324,12 @@ func (s *RestoreDBInstanceToPointInTimeInput) SetLicenseModel(v string) *Restore return s } +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *RestoreDBInstanceToPointInTimeInput) SetMaxAllocatedStorage(v int64) *RestoreDBInstanceToPointInTimeInput { + s.MaxAllocatedStorage = &v + return s +} + // SetMultiAZ sets the MultiAZ field's value. func (s *RestoreDBInstanceToPointInTimeInput) SetMultiAZ(v bool) *RestoreDBInstanceToPointInTimeInput { s.MultiAZ = &v diff --git a/service/rekognition/api.go b/service/rekognition/api.go index 390758dc172..28579d30906 100644 --- a/service/rekognition/api.go +++ b/service/rekognition/api.go @@ -89,10 +89,6 @@ func (c *Rekognition) CompareFacesRequest(input *CompareFacesInput) (req *reques // LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify // NONE. The default value is NONE. // -// To use quality filtering, you need a collection associated with version 3 -// of the face model or higher. To get the version of the face model associated -// with a collection, call DescribeCollection. -// // If the image doesn't contain Exif metadata, CompareFaces returns orientation // information for the source and target images. Use these values to display // the images with the correct image orientation. @@ -4619,10 +4615,10 @@ func (c *Rekognition) RecognizeCelebritiesRequest(input *RecognizeCelebritiesInp // Returns an array of celebrities recognized in the input image. For more information, // see Recognizing Celebrities in the Amazon Rekognition Developer Guide. // -// RecognizeCelebrities returns the 100 largest faces in the image. It lists +// RecognizeCelebrities returns the 64 largest faces in the image. It lists // recognized celebrities in the CelebrityFaces array and unrecognized faces // in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities -// whose faces aren't among the largest 100 faces in the image. +// whose faces aren't among the largest 64 faces in the image. // // For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. // The Celebrity object contains the celebrity name, ID, URL links to additional @@ -6465,11 +6461,13 @@ func (s *AgeRange) SetLow(v int64) *AgeRange { } // Assets are the images that you use to train and evaluate a model version. -// Assets are referenced by Sagemaker GroundTruth manifest files. +// Assets can also contain validation information that you use to debug a failed +// model training. type Asset struct { _ struct{} `type:"structure"` - // The S3 bucket that contains the Ground Truth manifest file. + // The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest + // file. GroundTruthManifest *GroundTruthManifest `type:"structure"` } @@ -6515,7 +6513,7 @@ type AudioMetadata struct { // The duration of the audio stream in milliseconds. DurationMillis *int64 `type:"long"` - // The number of audio channels in the segement. + // The number of audio channels in the segment. NumberOfChannels *int64 `type:"long"` // The sample rate for the audio stream. @@ -10740,7 +10738,10 @@ type GetSegmentDetectionOutput struct { // You can use this pagination token to retrieve the next set of text. NextToken *string `type:"string"` - // An array of segments detected in a video. + // An array of segments detected in a video. The array is sorted by the segment + // types (TECHNICAL_CUE or SHOT) specified in the SegmentTypes input parameter + // of StartSegmentDetection. Within each segment type the array is sorted by + // timestamp values. Segments []*SegmentDetection `type:"list"` // An array containing the segment types requested in the call to StartSegmentDetection. @@ -10948,7 +10949,8 @@ func (s *GetTextDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetTextDete return s } -// The S3 bucket that contains the Ground Truth manifest file. +// The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest +// file. type GroundTruthManifest struct { _ struct{} `type:"structure"` @@ -12108,14 +12110,16 @@ type Landmark struct { // Type of landmark. Type *string `type:"string" enum:"LandmarkType"` - // The x-coordinate from the top left of the landmark expressed as the ratio - // of the width of the image. For example, if the image is 700 x 200 and the - // x-coordinate of the landmark is at 350 pixels, this value is 0.5. + // The x-coordinate of the landmark expressed as a ratio of the width of the + // image. The x-coordinate is measured from the left-side of the image. For + // example, if the image is 700 pixels wide and the x-coordinate of the landmark + // is at 350 pixels, this value is 0.5. X *float64 `type:"float"` - // The y-coordinate from the top left of the landmark expressed as the ratio - // of the height of the image. For example, if the image is 700 x 200 and the - // y-coordinate of the landmark is at 100 pixels, this value is 0.5. + // The y-coordinate of the landmark expressed as a ratio of the height of the + // image. The y-coordinate is measured from the top of the image. For example, + // if the image height is 200 pixels and the y-coordinate of the landmark is + // at 50 pixels, this value is 0.25. Y *float64 `type:"float"` } @@ -12987,6 +12991,10 @@ type ProjectVersionDescription struct { // The training results. EvaluationResult is only returned if training is successful. EvaluationResult *EvaluationResult `type:"structure"` + // The location of the summary manifest. The summary manifest provides aggregate + // data validation results for the training and test datasets. + ManifestSummary *GroundTruthManifest `type:"structure"` + // The minimum number of inference units used by the model. For more information, // see StartProjectVersion. MinInferenceUnits *int64 `min:"1" type:"integer"` @@ -13003,10 +13011,10 @@ type ProjectVersionDescription struct { // A descriptive message for an error or warning that occurred. StatusMessage *string `type:"string"` - // The manifest file that represents the testing results. + // Contains information about the testing results. TestingDataResult *TestingDataResult `type:"structure"` - // The manifest file that represents the training results. + // Contains information about the training results. TrainingDataResult *TrainingDataResult `type:"structure"` // The Unix date and time that training of the model ended. @@ -13041,6 +13049,12 @@ func (s *ProjectVersionDescription) SetEvaluationResult(v *EvaluationResult) *Pr return s } +// SetManifestSummary sets the ManifestSummary field's value. +func (s *ProjectVersionDescription) SetManifestSummary(v *GroundTruthManifest) *ProjectVersionDescription { + s.ManifestSummary = v + return s +} + // SetMinInferenceUnits sets the MinInferenceUnits field's value. func (s *ProjectVersionDescription) SetMinInferenceUnits(v int64) *ProjectVersionDescription { s.MinInferenceUnits = &v @@ -13199,7 +13213,7 @@ type RecognizeCelebritiesOutput struct { _ struct{} `type:"structure"` // Details about each celebrity found in the image. Amazon Rekognition can detect - // a maximum of 15 celebrities in an image. + // a maximum of 64 celebrities in an image. CelebrityFaces []*Celebrity `type:"list"` // The orientation of the input image (counterclockwise direction). If your @@ -13872,7 +13886,7 @@ type SegmentDetection struct { EndTimecodeSMPTE *string `type:"string"` // The end time of the detected segment, in milliseconds, from the start of - // the video. + // the video. This value is rounded down. EndTimestampMillis *int64 `type:"long"` // If the segment is a shot detection, contains information about the shot detection. @@ -13884,7 +13898,9 @@ type SegmentDetection struct { StartTimecodeSMPTE *string `type:"string"` // The start time of the detected segment in milliseconds from the start of - // the video. + // the video. This value is rounded down. For example, if the actual timestamp + // is 100.6667 milliseconds, Amazon Rekognition Video returns a value of 100 + // millis. StartTimestampMillis *int64 `type:"long"` // If the segment is a technical cue, contains information about the technical @@ -14002,7 +14018,7 @@ type ShotSegment struct { // segment. Confidence *float64 `min:"50" type:"float"` - // An Identifier for a shot detection segment detected in a video + // An Identifier for a shot detection segment detected in a video. Index *int64 `type:"long"` } @@ -15812,8 +15828,8 @@ func (s *TestingData) SetAutoCreate(v bool) *TestingData { return s } -// A Sagemaker Groundtruth format manifest file representing the dataset used -// for testing. +// Sagemaker Groundtruth format manifest files for the input, output and validation +// datasets that are used and created during testing. type TestingDataResult struct { _ struct{} `type:"structure"` @@ -15823,6 +15839,10 @@ type TestingDataResult struct { // The subset of the dataset that was actually tested. Some images (assets) // might not be tested due to file formatting and other issues. Output *TestingData `type:"structure"` + + // The location of the data validation manifest. The data validation manifest + // is created for the test dataset during model training. + Validation *ValidationData `type:"structure"` } // String returns the string representation @@ -15847,6 +15867,12 @@ func (s *TestingDataResult) SetOutput(v *TestingData) *TestingDataResult { return s } +// SetValidation sets the Validation field's value. +func (s *TestingDataResult) SetValidation(v *ValidationData) *TestingDataResult { + s.Validation = v + return s +} + // Information about a word or line of text detected by DetectText. // // The DetectedText field contains the text that Amazon Rekognition detected @@ -16069,8 +16095,8 @@ func (s *TrainingData) SetAssets(v []*Asset) *TrainingData { return s } -// A Sagemaker Groundtruth format manifest file that represents the dataset -// used for training. +// Sagemaker Groundtruth format manifest files for the input, output and validation +// datasets that are used and created during testing. type TrainingDataResult struct { _ struct{} `type:"structure"` @@ -16080,6 +16106,10 @@ type TrainingDataResult struct { // The images (assets) that were actually trained by Amazon Rekognition Custom // Labels. Output *TrainingData `type:"structure"` + + // The location of the data validation manifest. The data validation manifest + // is created for the training dataset during model training. + Validation *ValidationData `type:"structure"` } // String returns the string representation @@ -16104,6 +16134,12 @@ func (s *TrainingDataResult) SetOutput(v *TrainingData) *TrainingDataResult { return s } +// SetValidation sets the Validation field's value. +func (s *TrainingDataResult) SetValidation(v *ValidationData) *TrainingDataResult { + s.Validation = v + return s +} + // A face that IndexFaces detected, but didn't index. Use the Reasons response // attribute to determine why a face wasn't indexed. type UnindexedFace struct { @@ -16153,6 +16189,42 @@ func (s *UnindexedFace) SetReasons(v []*string) *UnindexedFace { return s } +// Contains the Amazon S3 bucket location of the validation data for a model +// training job. +// +// The validation data includes error information for individual JSON lines +// in the dataset. For more information, see Debugging a Failed Model Training +// in the Amazon Rekognition Custom Labels Developer Guide. +// +// You get the ValidationData object for the training dataset (TrainingDataResult) +// and the test dataset (TestingDataResult) by calling DescribeProjectVersions. +// +// The assets array contains a single Asset object. The GroundTruthManifest +// field of the Asset object contains the S3 bucket location of the validation +// data. +type ValidationData struct { + _ struct{} `type:"structure"` + + // The assets that comprise the validation data. + Assets []*Asset `type:"list"` +} + +// String returns the string representation +func (s ValidationData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationData) GoString() string { + return s.String() +} + +// SetAssets sets the Assets field's value. +func (s *ValidationData) SetAssets(v []*Asset) *ValidationData { + s.Assets = v + return s +} + // Video file stored in an Amazon S3 bucket. Amazon Rekognition video start // operations such as StartLabelDetection use Video to specify a video for analysis. // The supported file formats are .mp4, .mov and .avi. diff --git a/service/sagemaker/api.go b/service/sagemaker/api.go index c841c706881..57a9f9b0aff 100644 --- a/service/sagemaker/api.go +++ b/service/sagemaker/api.go @@ -688,10 +688,10 @@ func (c *SageMaker) CreateDomainRequest(input *CreateDomainInput) (req *request. // VPC configuration // // All SageMaker Studio traffic between the domain and the EFS volume is through -// the specified VPC and subnets. For other Studio traffic, you specify the -// AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the VPC -// mode that's chosen when you onboard to Studio. The following options are -// available: +// the specified VPC and subnets. For other Studio traffic, you can specify +// the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the +// network access type that you choose when you onboard to Studio. The following +// options are available: // // * PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon // SageMaker, which allows internet access. This is the default value. @@ -703,9 +703,9 @@ func (c *SageMaker) CreateDomainRequest(input *CreateDomainInput) (req *request. // (PrivateLink) or a NAT gateway and your security groups allow outbound // connections. // -// VpcOnly mode +// VpcOnly network access type // -// When you specify VpcOnly, you must specify the following: +// When you choose VpcOnly, you must specify the following: // // * Security group inbound and outbound rules to allow NFS traffic over // TCP on port 2049 between the domain and the EFS volume @@ -31879,6 +31879,52 @@ type InputConfig struct { // // * XGBOOST: input data name and shape are not needed. // + // DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice + // (ML Model format): + // + // * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. + // In addition to static input shapes, CoreML converter supports Flexible + // input shapes: Range Dimension. You can use the Range Dimension feature + // if you know the input shape will be within some specific interval in that + // dimension, for example: {"input_1": {"shape": ["1..10", 224, 224, 3]}} + // Enumerated shapes. Sometimes, the models are trained to work only on a + // select set of inputs. You can enumerate all supported input shapes, for + // example: {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} + // + // * default_shape: Default input shape. You can set a default shape during + // conversion for both Range Dimension and Enumerated Shapes. For example + // {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, + // 224, 3]}} + // + // * type: Input type. Allowed values: Image and Tensor. By default, the + // converter generates an ML Model with inputs of type Tensor (MultiArray). + // User can set input type to be Image. Image input type requires additional + // input parameters such as bias and scale. + // + // * bias: If the input type is an Image, you need to provide the bias vector. + // + // * scale: If the input type is an Image, you need to provide a scale factor. + // + // CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. + // CoreML converter supports Tensorflow and PyTorch models. CoreML conversion + // examples: + // + // * Tensor type input: "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], + // [1,160,160,3]], "default_shape": [1,224,224,3]}} + // + // * Tensor type input without input name (PyTorch): "DataInputConfig": [{"shape": + // [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] + // + // * Image type input: "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], + // [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": + // [-1,-1,-1], "scale": 0.007843137255}} "CompilerOptions": {"class_labels": + // "imagenet_labels_1000.txt"} + // + // * Image type input without input name (PyTorch): "DataInputConfig": [{"shape": + // [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": + // "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] "CompilerOptions": + // {"class_labels": "imagenet_labels_1000.txt"} + // // DataInputConfig is a required field DataInputConfig *string `min:"1" type:"string" required:"true"` @@ -39752,7 +39798,7 @@ type OutputConfig struct { // Specifies additional parameters for compiler options in JSON format. The // compiler options are TargetPlatform specific. It is required for NVIDIA accelerators - // and highly recommended for CPU compliations. For any other cases, it is optional + // and highly recommended for CPU compilations. For any other cases, it is optional // to specify CompilerOptions. // // * CPU: Compilation for CPU supports the following compiler options. mcpu: @@ -39775,6 +39821,12 @@ type OutputConfig struct { // levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: // Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit // platform with NEON support. + // + // * CoreML: Compilation for the CoreML OutputConfig$TargetDevice supports + // the following compiler options: class_labels: Specifies the classification + // labels file name inside input tar.gz file. For example, {"class_labels": + // "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated + // by newlines. CompilerOptions *string `min:"7" type:"string"` // Identifies the S3 bucket where you want Amazon SageMaker to store the model @@ -51757,6 +51809,9 @@ const ( // TargetDeviceX86Win64 is a TargetDevice enum value TargetDeviceX86Win64 = "x86_win64" + + // TargetDeviceCoreml is a TargetDevice enum value + TargetDeviceCoreml = "coreml" ) // TargetDevice_Values returns all elements of the TargetDevice enum @@ -51788,6 +51843,7 @@ func TargetDevice_Values() []string { TargetDeviceAmbaCv22, TargetDeviceX86Win32, TargetDeviceX86Win64, + TargetDeviceCoreml, } } diff --git a/service/sns/api.go b/service/sns/api.go index e78534596b5..cc192c9799f 100644 --- a/service/sns/api.go +++ b/service/sns/api.go @@ -6149,12 +6149,12 @@ type SubscribeInput struct { // Sets whether the response from the Subscribe request includes the subscription // ARN, even if the subscription is not yet confirmed. // - // * If you set this parameter to true, the response includes the ARN in - // all cases, even if the subscription is not yet confirmed. In addition - // to the ARN for confirmed subscriptions, the response also includes the - // pending subscription ARN value for subscriptions that aren't yet confirmed. - // A subscription becomes confirmed when the subscriber calls the ConfirmSubscription - // action with a confirmation token. + // If you set this parameter to true, the response includes the ARN in all cases, + // even if the subscription is not yet confirmed. In addition to the ARN for + // confirmed subscriptions, the response also includes the pending subscription + // ARN value for subscriptions that aren't yet confirmed. A subscription becomes + // confirmed when the subscriber calls the ConfirmSubscription action with a + // confirmation token. // // The default value is false. ReturnSubscriptionArn *bool `type:"boolean"`