From 86c1a369580be45a3dd4d89afe7f06baacdeb37c Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Fri, 30 Jun 2023 14:32:33 -0400 Subject: [PATCH] Release v1.44.294 (2023-06-30) (#4900) Release v1.44.294 (2023-06-30) === ### Service Client Updates * `service/amp`: Updates service API and documentation * `service/ecs`: Updates service API and documentation * Added new field "credentialspecs" to the ecs task definition to support gMSA of windows/linux in both domainless and domain-joined mode * `service/ivs`: Updates service API * `service/mediaconvert`: Updates service documentation * This release includes improved color handling of overlays and general updates to user documentation. * `service/sagemaker`: Updates service API and documentation * This release adds support for rolling deployment in SageMaker Inference. * `service/transfer`: Updates service API and documentation * Add outbound Basic authentication support to AS2 connectors * `service/verifiedpermissions`: Updates service documentation --- CHANGELOG.md | 16 + aws/version.go | 2 +- models/apis/amp/2020-08-01/api-2.json | 330 +++++++------- models/apis/amp/2020-08-01/docs-2.json | 8 +- .../amp/2020-08-01/endpoint-rule-set-1.json | 350 ++++++++++++++ .../apis/amp/2020-08-01/endpoint-tests-1.json | 431 ++++++++++++++++++ models/apis/ecs/2014-11-13/api-2.json | 3 +- models/apis/ecs/2014-11-13/docs-2.json | 3 +- models/apis/ivs/2020-07-14/api-2.json | 4 +- .../apis/mediaconvert/2017-08-29/docs-2.json | 16 +- models/apis/sagemaker/2017-07-24/api-2.json | 22 +- models/apis/sagemaker/2017-07-24/docs-2.json | 14 +- models/apis/transfer/2018-11-05/api-2.json | 8 +- models/apis/transfer/2018-11-05/docs-2.json | 18 +- .../2021-12-01/docs-2.json | 10 +- .../2021-12-01/endpoint-tests-1.json | 86 ++-- service/ecs/api.go | 33 ++ service/mediaconvert/api.go | 36 +- service/sagemaker/api.go | 132 +++++- service/transfer/api.go | 74 +++ service/verifiedpermissions/api.go | 12 +- 21 files changed, 1334 insertions(+), 274 deletions(-) create mode 100644 models/apis/amp/2020-08-01/endpoint-rule-set-1.json create mode 100644 models/apis/amp/2020-08-01/endpoint-tests-1.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c6f6e60f8a..fc5c5cd54b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.44.294 (2023-06-30) +=== + +### Service Client Updates +* `service/amp`: Updates service API and documentation +* `service/ecs`: Updates service API and documentation + * Added new field "credentialspecs" to the ecs task definition to support gMSA of windows/linux in both domainless and domain-joined mode +* `service/ivs`: Updates service API +* `service/mediaconvert`: Updates service documentation + * This release includes improved color handling of overlays and general updates to user documentation. +* `service/sagemaker`: Updates service API and documentation + * This release adds support for rolling deployment in SageMaker Inference. +* `service/transfer`: Updates service API and documentation + * Add outbound Basic authentication support to AS2 connectors +* `service/verifiedpermissions`: Updates service documentation + Release v1.44.293 (2023-06-29) === diff --git a/aws/version.go b/aws/version.go index a84ca15406f..bc221534523 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.293" +const SDKVersion = "1.44.294" diff --git a/models/apis/amp/2020-08-01/api-2.json b/models/apis/amp/2020-08-01/api-2.json index e25d82eac17..98249eb97a5 100644 --- a/models/apis/amp/2020-08-01/api-2.json +++ b/models/apis/amp/2020-08-01/api-2.json @@ -406,16 +406,16 @@ "AlertManagerDefinitionDescription":{ "type":"structure", "required":[ - "createdAt", + "status", "data", - "modifiedAt", - "status" + "createdAt", + "modifiedAt" ], "members":{ - "createdAt":{"shape":"Timestamp"}, + "status":{"shape":"AlertManagerDefinitionStatus"}, "data":{"shape":"AlertManagerDefinitionData"}, - "modifiedAt":{"shape":"Timestamp"}, - "status":{"shape":"AlertManagerDefinitionStatus"} + "createdAt":{"shape":"Timestamp"}, + "modifiedAt":{"shape":"Timestamp"} } }, "AlertManagerDefinitionStatus":{ @@ -458,19 +458,19 @@ "CreateAlertManagerDefinitionRequest":{ "type":"structure", "required":[ - "data", - "workspaceId" + "workspaceId", + "data" ], "members":{ - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true - }, - "data":{"shape":"AlertManagerDefinitionData"}, "workspaceId":{ "shape":"WorkspaceId", "location":"uri", "locationName":"workspaceId" + }, + "data":{"shape":"AlertManagerDefinitionData"}, + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true } } }, @@ -484,19 +484,19 @@ "CreateLoggingConfigurationRequest":{ "type":"structure", "required":[ - "logGroupArn", - "workspaceId" + "workspaceId", + "logGroupArn" ], "members":{ - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true - }, - "logGroupArn":{"shape":"LogGroupArn"}, "workspaceId":{ "shape":"WorkspaceId", "location":"uri", "locationName":"workspaceId" + }, + "logGroupArn":{"shape":"LogGroupArn"}, + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true } } }, @@ -510,35 +510,35 @@ "CreateRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "data", + "workspaceId", "name", - "workspaceId" + "data" ], "members":{ - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true - }, - "data":{"shape":"RuleGroupsNamespaceData"}, - "name":{"shape":"RuleGroupsNamespaceName"}, - "tags":{"shape":"TagMap"}, "workspaceId":{ "shape":"WorkspaceId", "location":"uri", "locationName":"workspaceId" - } + }, + "name":{"shape":"RuleGroupsNamespaceName"}, + "data":{"shape":"RuleGroupsNamespaceData"}, + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true + }, + "tags":{"shape":"TagMap"} } }, "CreateRuleGroupsNamespaceResponse":{ "type":"structure", "required":[ - "arn", "name", + "arn", "status" ], "members":{ - "arn":{"shape":"RuleGroupsNamespaceArn"}, "name":{"shape":"RuleGroupsNamespaceName"}, + "arn":{"shape":"RuleGroupsNamespaceArn"}, "status":{"shape":"RuleGroupsNamespaceStatus"}, "tags":{"shape":"TagMap"} } @@ -557,31 +557,31 @@ "CreateWorkspaceResponse":{ "type":"structure", "required":[ + "workspaceId", "arn", - "status", - "workspaceId" + "status" ], "members":{ + "workspaceId":{"shape":"WorkspaceId"}, "arn":{"shape":"WorkspaceArn"}, "status":{"shape":"WorkspaceStatus"}, - "tags":{"shape":"TagMap"}, - "workspaceId":{"shape":"WorkspaceId"} + "tags":{"shape":"TagMap"} } }, "DeleteAlertManagerDefinitionRequest":{ "type":"structure", "required":["workspaceId"], "members":{ + "workspaceId":{ + "shape":"WorkspaceId", + "location":"uri", + "locationName":"workspaceId" + }, "clientToken":{ "shape":"IdempotencyToken", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" - }, - "workspaceId":{ - "shape":"WorkspaceId", - "location":"uri", - "locationName":"workspaceId" } } }, @@ -589,41 +589,41 @@ "type":"structure", "required":["workspaceId"], "members":{ + "workspaceId":{ + "shape":"WorkspaceId", + "location":"uri", + "locationName":"workspaceId" + }, "clientToken":{ "shape":"IdempotencyToken", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" - }, - "workspaceId":{ - "shape":"WorkspaceId", - "location":"uri", - "locationName":"workspaceId" } } }, "DeleteRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "name", - "workspaceId" + "workspaceId", + "name" ], "members":{ - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true, - "location":"querystring", - "locationName":"clientToken" + "workspaceId":{ + "shape":"WorkspaceId", + "location":"uri", + "locationName":"workspaceId" }, "name":{ "shape":"RuleGroupsNamespaceName", "location":"uri", "locationName":"name" }, - "workspaceId":{ - "shape":"WorkspaceId", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" } } }, @@ -631,16 +631,16 @@ "type":"structure", "required":["workspaceId"], "members":{ + "workspaceId":{ + "shape":"WorkspaceId", + "location":"uri", + "locationName":"workspaceId" + }, "clientToken":{ "shape":"IdempotencyToken", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" - }, - "workspaceId":{ - "shape":"WorkspaceId", - "location":"uri", - "locationName":"workspaceId" } } }, @@ -683,19 +683,19 @@ "DescribeRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "name", - "workspaceId" + "workspaceId", + "name" ], "members":{ - "name":{ - "shape":"RuleGroupsNamespaceName", - "location":"uri", - "locationName":"name" - }, "workspaceId":{ "shape":"WorkspaceId", "location":"uri", "locationName":"workspaceId" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "location":"uri", + "locationName":"name" } } }, @@ -754,10 +754,10 @@ "type":"structure", "required":["workspaceId"], "members":{ - "maxResults":{ - "shape":"ListRuleGroupsNamespacesRequestMaxResultsInteger", - "location":"querystring", - "locationName":"maxResults" + "workspaceId":{ + "shape":"WorkspaceId", + "location":"uri", + "locationName":"workspaceId" }, "name":{ "shape":"RuleGroupsNamespaceName", @@ -769,10 +769,10 @@ "location":"querystring", "locationName":"nextToken" }, - "workspaceId":{ - "shape":"WorkspaceId", - "location":"uri", - "locationName":"workspaceId" + "maxResults":{ + "shape":"ListRuleGroupsNamespacesRequestMaxResultsInteger", + "location":"querystring", + "locationName":"maxResults" } } }, @@ -786,8 +786,8 @@ "type":"structure", "required":["ruleGroupsNamespaces"], "members":{ - "nextToken":{"shape":"PaginationToken"}, - "ruleGroupsNamespaces":{"shape":"RuleGroupsNamespaceSummaryList"} + "ruleGroupsNamespaces":{"shape":"RuleGroupsNamespaceSummaryList"}, + "nextToken":{"shape":"PaginationToken"} } }, "ListTagsForResourceRequest":{ @@ -810,6 +810,11 @@ "ListWorkspacesRequest":{ "type":"structure", "members":{ + "nextToken":{ + "shape":"PaginationToken", + "location":"querystring", + "locationName":"nextToken" + }, "alias":{ "shape":"WorkspaceAlias", "location":"querystring", @@ -819,11 +824,6 @@ "shape":"ListWorkspacesRequestMaxResultsInteger", "location":"querystring", "locationName":"maxResults" - }, - "nextToken":{ - "shape":"PaginationToken", - "location":"querystring", - "locationName":"nextToken" } } }, @@ -837,29 +837,29 @@ "type":"structure", "required":["workspaces"], "members":{ - "nextToken":{"shape":"PaginationToken"}, - "workspaces":{"shape":"WorkspaceSummaryList"} + "workspaces":{"shape":"WorkspaceSummaryList"}, + "nextToken":{"shape":"PaginationToken"} } }, "LogGroupArn":{ "type":"string", - "pattern":"^arn:aws[a-z0-9-]*:logs:[a-z0-9-]+:\\d{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*$" + "pattern":"arn:aws[a-z0-9-]*:logs:[a-z0-9-]+:\\d{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*" }, "LoggingConfigurationMetadata":{ "type":"structure", "required":[ - "createdAt", - "logGroupArn", - "modifiedAt", "status", - "workspace" + "workspace", + "logGroupArn", + "createdAt", + "modifiedAt" ], "members":{ - "createdAt":{"shape":"Timestamp"}, - "logGroupArn":{"shape":"LogGroupArn"}, - "modifiedAt":{"shape":"Timestamp"}, "status":{"shape":"LoggingConfigurationStatus"}, - "workspace":{"shape":"WorkspaceId"} + "workspace":{"shape":"WorkspaceId"}, + "logGroupArn":{"shape":"LogGroupArn"}, + "createdAt":{"shape":"Timestamp"}, + "modifiedAt":{"shape":"Timestamp"} } }, "LoggingConfigurationStatus":{ @@ -885,19 +885,19 @@ "PutAlertManagerDefinitionRequest":{ "type":"structure", "required":[ - "data", - "workspaceId" + "workspaceId", + "data" ], "members":{ - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true - }, - "data":{"shape":"AlertManagerDefinitionData"}, "workspaceId":{ "shape":"WorkspaceId", "location":"uri", "locationName":"workspaceId" + }, + "data":{"shape":"AlertManagerDefinitionData"}, + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true } } }, @@ -911,38 +911,38 @@ "PutRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "data", + "workspaceId", "name", - "workspaceId" + "data" ], "members":{ - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "location":"uri", + "locationName":"workspaceId" }, - "data":{"shape":"RuleGroupsNamespaceData"}, "name":{ "shape":"RuleGroupsNamespaceName", "location":"uri", "locationName":"name" }, - "workspaceId":{ - "shape":"WorkspaceId", - "location":"uri", - "locationName":"workspaceId" + "data":{"shape":"RuleGroupsNamespaceData"}, + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true } } }, "PutRuleGroupsNamespaceResponse":{ "type":"structure", "required":[ - "arn", "name", + "arn", "status" ], "members":{ - "arn":{"shape":"RuleGroupsNamespaceArn"}, "name":{"shape":"RuleGroupsNamespaceName"}, + "arn":{"shape":"RuleGroupsNamespaceArn"}, "status":{"shape":"RuleGroupsNamespaceStatus"}, "tags":{"shape":"TagMap"} } @@ -971,19 +971,19 @@ "type":"structure", "required":[ "arn", - "createdAt", - "data", - "modifiedAt", "name", - "status" + "status", + "data", + "createdAt", + "modifiedAt" ], "members":{ "arn":{"shape":"RuleGroupsNamespaceArn"}, - "createdAt":{"shape":"Timestamp"}, - "data":{"shape":"RuleGroupsNamespaceData"}, - "modifiedAt":{"shape":"Timestamp"}, "name":{"shape":"RuleGroupsNamespaceName"}, "status":{"shape":"RuleGroupsNamespaceStatus"}, + "data":{"shape":"RuleGroupsNamespaceData"}, + "createdAt":{"shape":"Timestamp"}, + "modifiedAt":{"shape":"Timestamp"}, "tags":{"shape":"TagMap"} } }, @@ -991,7 +991,7 @@ "type":"string", "max":64, "min":1, - "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + "pattern":".*[0-9A-Za-z][-.0-9A-Z_a-z]*.*" }, "RuleGroupsNamespaceStatus":{ "type":"structure", @@ -1016,17 +1016,17 @@ "type":"structure", "required":[ "arn", - "createdAt", - "modifiedAt", "name", - "status" + "status", + "createdAt", + "modifiedAt" ], "members":{ "arn":{"shape":"RuleGroupsNamespaceArn"}, - "createdAt":{"shape":"Timestamp"}, - "modifiedAt":{"shape":"Timestamp"}, "name":{"shape":"RuleGroupsNamespaceName"}, "status":{"shape":"RuleGroupsNamespaceStatus"}, + "createdAt":{"shape":"Timestamp"}, + "modifiedAt":{"shape":"Timestamp"}, "tags":{"shape":"TagMap"} } }, @@ -1038,17 +1038,17 @@ "type":"structure", "required":[ "message", - "quotaCode", "resourceId", "resourceType", - "serviceCode" + "serviceCode", + "quotaCode" ], "members":{ "message":{"shape":"String"}, - "quotaCode":{"shape":"String"}, "resourceId":{"shape":"String"}, "resourceType":{"shape":"String"}, - "serviceCode":{"shape":"String"} + "serviceCode":{"shape":"String"}, + "quotaCode":{"shape":"String"} }, "error":{ "httpStatusCode":402, @@ -1061,7 +1061,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "TagKeys":{ "type":"list", @@ -1098,20 +1098,20 @@ "type":"string", "max":256, "min":0, - "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" }, "ThrottlingException":{ "type":"structure", "required":["message"], "members":{ "message":{"shape":"String"}, + "serviceCode":{"shape":"String"}, "quotaCode":{"shape":"String"}, "retryAfterSeconds":{ "shape":"Integer", "location":"header", "locationName":"Retry-After" - }, - "serviceCode":{"shape":"String"} + } }, "error":{ "httpStatusCode":429, @@ -1148,19 +1148,19 @@ "UpdateLoggingConfigurationRequest":{ "type":"structure", "required":[ - "logGroupArn", - "workspaceId" + "workspaceId", + "logGroupArn" ], "members":{ - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true - }, - "logGroupArn":{"shape":"LogGroupArn"}, "workspaceId":{ "shape":"WorkspaceId", "location":"uri", "locationName":"workspaceId" + }, + "logGroupArn":{"shape":"LogGroupArn"}, + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true } } }, @@ -1175,15 +1175,15 @@ "type":"structure", "required":["workspaceId"], "members":{ - "alias":{"shape":"WorkspaceAlias"}, - "clientToken":{ - "shape":"IdempotencyToken", - "idempotencyToken":true - }, "workspaceId":{ "shape":"WorkspaceId", "location":"uri", "locationName":"workspaceId" + }, + "alias":{"shape":"WorkspaceAlias"}, + "clientToken":{ + "shape":"IdempotencyToken", + "idempotencyToken":true } } }, @@ -1199,9 +1199,9 @@ "reason" ], "members":{ - "fieldList":{"shape":"ValidationExceptionFieldList"}, "message":{"shape":"String"}, - "reason":{"shape":"ValidationExceptionReason"} + "reason":{"shape":"ValidationExceptionReason"}, + "fieldList":{"shape":"ValidationExceptionFieldList"} }, "error":{ "httpStatusCode":400, @@ -1212,12 +1212,12 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "message", - "name" + "name", + "message" ], "members":{ - "message":{"shape":"String"}, - "name":{"shape":"String"} + "name":{"shape":"String"}, + "message":{"shape":"String"} } }, "ValidationExceptionFieldList":{ @@ -1242,26 +1242,26 @@ "WorkspaceDescription":{ "type":"structure", "required":[ + "workspaceId", "arn", - "createdAt", "status", - "workspaceId" + "createdAt" ], "members":{ + "workspaceId":{"shape":"WorkspaceId"}, "alias":{"shape":"WorkspaceAlias"}, "arn":{"shape":"WorkspaceArn"}, - "createdAt":{"shape":"Timestamp"}, - "prometheusEndpoint":{"shape":"Uri"}, "status":{"shape":"WorkspaceStatus"}, - "tags":{"shape":"TagMap"}, - "workspaceId":{"shape":"WorkspaceId"} + "prometheusEndpoint":{"shape":"Uri"}, + "createdAt":{"shape":"Timestamp"}, + "tags":{"shape":"TagMap"} } }, "WorkspaceId":{ "type":"string", "max":64, "min":1, - "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + "pattern":".*[0-9A-Za-z][-.0-9A-Z_a-z]*.*" }, "WorkspaceStatus":{ "type":"structure", @@ -1283,18 +1283,18 @@ "WorkspaceSummary":{ "type":"structure", "required":[ + "workspaceId", "arn", - "createdAt", "status", - "workspaceId" + "createdAt" ], "members":{ + "workspaceId":{"shape":"WorkspaceId"}, "alias":{"shape":"WorkspaceAlias"}, "arn":{"shape":"WorkspaceArn"}, - "createdAt":{"shape":"Timestamp"}, "status":{"shape":"WorkspaceStatus"}, - "tags":{"shape":"TagMap"}, - "workspaceId":{"shape":"WorkspaceId"} + "createdAt":{"shape":"Timestamp"}, + "tags":{"shape":"TagMap"} } }, "WorkspaceSummaryList":{ diff --git a/models/apis/amp/2020-08-01/docs-2.json b/models/apis/amp/2020-08-01/docs-2.json index 09149e2bae7..18773496c01 100644 --- a/models/apis/amp/2020-08-01/docs-2.json +++ b/models/apis/amp/2020-08-01/docs-2.json @@ -381,18 +381,18 @@ "ResourceNotFoundException$resourceType": "
Type of the resource affected.
", "RuleGroupsNamespaceStatus$statusReason": "The reason for failure if any.
", "ServiceQuotaExceededException$message": "Description of the error.
", - "ServiceQuotaExceededException$quotaCode": "Service Quotas requirement to identify originating quota.
", "ServiceQuotaExceededException$resourceId": "Identifier of the resource affected.
", "ServiceQuotaExceededException$resourceType": "Type of the resource affected.
", "ServiceQuotaExceededException$serviceCode": "Service Quotas requirement to identify originating service.
", + "ServiceQuotaExceededException$quotaCode": "Service Quotas requirement to identify originating quota.
", "TagResourceRequest$resourceArn": "The ARN of the resource.
", "ThrottlingException$message": "Description of the error.
", - "ThrottlingException$quotaCode": "Service Quotas requirement to identify originating quota.
", "ThrottlingException$serviceCode": "Service Quotas requirement to identify originating service.
", + "ThrottlingException$quotaCode": "Service Quotas requirement to identify originating quota.
", "UntagResourceRequest$resourceArn": "The ARN of the resource.
", "ValidationException$message": "Description of the error.
", - "ValidationExceptionField$message": "Message describing why the field failed validation.
", - "ValidationExceptionField$name": "The field name.
" + "ValidationExceptionField$name": "The field name.
", + "ValidationExceptionField$message": "Message describing why the field failed validation.
" } }, "TagKey": { diff --git a/models/apis/amp/2020-08-01/endpoint-rule-set-1.json b/models/apis/amp/2020-08-01/endpoint-rule-set-1.json new file mode 100644 index 00000000000..08690bafa0c --- /dev/null +++ b/models/apis/amp/2020-08-01/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aps-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aps-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aps.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://aps.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] + } + ] +} \ No newline at end of file diff --git a/models/apis/amp/2020-08-01/endpoint-tests-1.json b/models/apis/amp/2020-08-01/endpoint-tests-1.json new file mode 100644 index 00000000000..24b2a120ad6 --- /dev/null +++ b/models/apis/amp/2020-08-01/endpoint-tests-1.json @@ -0,0 +1,431 @@ +{ + "testCases": [ + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.ap-northeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.ap-southeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.ap-southeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.eu-central-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.eu-north-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aps.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aps.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://aps.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://aps.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/apis/ecs/2014-11-13/api-2.json b/models/apis/ecs/2014-11-13/api-2.json index f1bdc1db3ee..b2e11b2e416 100644 --- a/models/apis/ecs/2014-11-13/api-2.json +++ b/models/apis/ecs/2014-11-13/api-2.json @@ -1290,7 +1290,8 @@ "healthCheck":{"shape":"HealthCheck"}, "systemControls":{"shape":"SystemControls"}, "resourceRequirements":{"shape":"ResourceRequirements"}, - "firelensConfiguration":{"shape":"FirelensConfiguration"} + "firelensConfiguration":{"shape":"FirelensConfiguration"}, + "credentialSpecs":{"shape":"StringList"} } }, "ContainerDefinitions":{ diff --git a/models/apis/ecs/2014-11-13/docs-2.json b/models/apis/ecs/2014-11-13/docs-2.json index 0d0064d8b6f..ad454835fdf 100644 --- a/models/apis/ecs/2014-11-13/docs-2.json +++ b/models/apis/ecs/2014-11-13/docs-2.json @@ -1030,7 +1030,7 @@ } }, "HealthCheck": { - "base": "An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated or there's no container health check defined.
The following describes the possible healthStatus
values for a task. The container health check status of non-essential containers don't have an effect on the health status of a task.
HEALTHY
-All essential containers within the task have passed their health checks.
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-The essential containers within the task are still having their health checks evaluated, there are only nonessential containers with health checks defined, or there are no container health checks defined.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
The following are notes about container health check support:
Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
An object representing a container health check. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image (such as those specified in a parent image or from the image's Dockerfile). This configuration maps to the HEALTHCHECK
parameter of docker run.
The Amazon ECS container agent only monitors and reports on the health checks specified in the task definition. Amazon ECS does not monitor Docker health checks that are embedded in a container image and not specified in the container definition. Health check parameters that are specified in a container definition override any Docker health checks that exist in the container image.
You can view the health status of both individual containers and a task with the DescribeTasks API operation or when viewing the task details in the console.
The health check is designed to make sure that your containers survive agent restarts, upgrades, or temporary unavailability.
The following describes the possible healthStatus
values for a container:
HEALTHY
-The container health check has passed successfully.
UNHEALTHY
-The container health check has failed.
UNKNOWN
-The container health check is being evaluated or there's no container health check defined.
The following describes the possible healthStatus
values for a task. The container health check status of non-essential containers don't have an effect on the health status of a task.
HEALTHY
-All essential containers within the task have passed their health checks.
UNHEALTHY
-One or more essential containers have failed their health check.
UNKNOWN
-The essential containers within the task are still having their health checks evaluated, there are only nonessential containers with health checks defined, or there are no container health checks defined.
If a task is run manually, and not as part of a service, the task will continue its lifecycle regardless of its health status. For tasks that are part of a service, if the task reports as unhealthy then the task will be stopped and the service scheduler will replace it.
The following are notes about container health check support:
When the Amazon ECS agent cannot connect to the Amazon ECS service, the service reports the container as UNHEALTHY
.
The health check statuses are the \"last heard from\" response from the Amazon ECS agent. There are no assumptions made about the status of the container health checks.
Container health checks require version 1.17.0 or greater of the Amazon ECS container agent. For more information, see Updating the Amazon ECS container agent.
Container health checks are supported for Fargate tasks if you're using platform version 1.1.0
or greater. For more information, see Fargate platform versions.
Container health checks aren't supported for tasks that are part of a service that's configured to use a Classic Load Balancer.
The container health check command and associated configuration parameters for the container. This parameter maps to HealthCheck
in the Create a container section of the Docker Remote API and the HEALTHCHECK
parameter of docker run.
A list of DNS servers that are presented to the container. This parameter maps to Dns
in the Create a container section of the Docker Remote API and the --dns
option to docker run.
This parameter is not supported for Windows containers.
A list of DNS search domains that are presented to the container. This parameter maps to DnsSearch
in the Create a container section of the Docker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see Docker Run Security Configuration. This field isn't valid for containers in tasks using the Fargate launch type.
For Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.
For any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers in the Amazon Elastic Container Service Developer Guide.
This parameter maps to SecurityOpt
in the Create a container section of the Docker Remote API and the --security-opt
option to docker run.
The Amazon ECS container agent running on a container instance must register with the ECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
environment variables before containers placed on that instance can use these security options. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.
For more information about valid values, see Docker Run Security Configuration.
Valid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"
", + "ContainerDefinition$credentialSpecs": "A list of ARNs in SSM or Amazon S3 to a credential spec (credspec
code>) file that configures a container for Active Directory authentication. This parameter is only used with domainless authentication.
The format for each ARN is credentialspecdomainless:MyARN
. Replace MyARN
with the ARN in SSM or Amazon S3.
The credspec
must provide a ARN in Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see Using gMSAs for Windows Containers and Using gMSAs for Linux Containers.
The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name.
", "CreateClusterRequest$capacityProviders": "The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the CreateService or RunTask actions.
If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be created but not associated with another cluster. New Auto Scaling group capacity providers can be created with the CreateCapacityProvider API operation.
To use a Fargate capacity provider, specify either the FARGATE
or FARGATE_SPOT
capacity providers. The Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.
The PutCapacityProvider API operation is used to update the list of available capacity providers for a cluster after the cluster is created.
", "DeleteTaskDefinitionsRequest$taskDefinitions": "The family
and revision
(family:revision
) or full Amazon Resource Name (ARN) of the task definition to delete. You must specify a revision
.
You can specify up to 10 task definitions as a comma separated list.
", diff --git a/models/apis/ivs/2020-07-14/api-2.json b/models/apis/ivs/2020-07-14/api-2.json index e07dcec88c3..6084f97b66b 100644 --- a/models/apis/ivs/2020-07-14/api-2.json +++ b/models/apis/ivs/2020-07-14/api-2.json @@ -135,7 +135,7 @@ "http":{ "method":"POST", "requestUri":"/DeleteRecordingConfiguration", - "responseCode":200 + "responseCode":204 }, "input":{"shape":"DeleteRecordingConfigurationRequest"}, "errors":[ @@ -378,7 +378,7 @@ "http":{ "method":"POST", "requestUri":"/PutMetadata", - "responseCode":200 + "responseCode":204 }, "input":{"shape":"PutMetadataRequest"}, "errors":[ diff --git a/models/apis/mediaconvert/2017-08-29/docs-2.json b/models/apis/mediaconvert/2017-08-29/docs-2.json index 16f78e644af..5a66630e911 100644 --- a/models/apis/mediaconvert/2017-08-29/docs-2.json +++ b/models/apis/mediaconvert/2017-08-29/docs-2.json @@ -1957,7 +1957,7 @@ "Hdr10Plus": { "base": "Setting for HDR10+ metadata insertion", "refs": { - "VideoPreprocessor$Hdr10Plus": "Enable HDR10+ analyis and metadata injection. Compatible with HEVC only." + "VideoPreprocessor$Hdr10Plus": "Enable HDR10+ analysis and metadata injection. Compatible with HEVC only." } }, "HlsAdMarkers": { @@ -2376,7 +2376,7 @@ "AudioSelector$LanguageCode": "Selects a specific language code from within an audio source.", "CaptionDescription$LanguageCode": "Specify the language of this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text.", "CaptionDescriptionPreset$LanguageCode": "Specify the language of this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text.", - "CaptionSelector$LanguageCode": "The specific language to extract from source. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions.", + "CaptionSelector$LanguageCode": "The specific language to extract from source. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions.", "HlsCaptionLanguageMapping$LanguageCode": "Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php.", "HlsRenditionGroupSettings$RenditionLanguageCode": "Optional. Specify ISO 639-2 or ISO 639-3 code in the language property", "WebvttHlsSourceSettings$RenditionLanguageCode": "Optional. Specify ISO 639-2 or ISO 639-3 code in the language property" @@ -2914,9 +2914,9 @@ } }, "MxfProfile": { - "base": "Specify the MXF profile, also called shim, for this output. When you choose Auto, MediaConvert chooses a profile based on the video codec and resolution. For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html.", + "base": "Specify the MXF profile, also called shim, for this output. To automatically select a profile according to your output video codec and resolution, leave blank. For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html.", "refs": { - "MxfSettings$Profile": "Specify the MXF profile, also called shim, for this output. When you choose Auto, MediaConvert chooses a profile based on the video codec and resolution. For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html." + "MxfSettings$Profile": "Specify the MXF profile, also called shim, for this output. To automatically select a profile according to your output video codec and resolution, leave blank. For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html." } }, "MxfSettings": { @@ -3612,7 +3612,7 @@ "VideoCodec": { "base": "Type of video codec", "refs": { - "VideoCodecSettings$Codec": "Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV." + "VideoCodecSettings$Codec": "Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV." } }, "VideoCodecSettings": { @@ -4470,7 +4470,7 @@ "DashIsoGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (DashIsoSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", "DvbSubDestinationSettings$Height": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match.", "DvbSubDestinationSettings$Width": "Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match.", - "DvbSubSourceSettings$Pid": "When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, regardless of selectors.", + "DvbSubSourceSettings$Pid": "When using DVB-Sub with Burn-in, use this PID for the source content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, regardless of selectors.", "FrameCaptureSettings$FramerateDenominator": "Frame capture will encode the first frame of the output stream, then one frame every framerateDenominator/framerateNumerator seconds. For example, settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per second) will capture the first frame, then 1 frame every 3s. Files will be named as filename.n.jpg where n is the 0-based sequence number of each Capture.", "FrameCaptureSettings$FramerateNumerator": "Frame capture will encode the first frame of the output stream, then one frame every framerateDenominator/framerateNumerator seconds. For example, settings of framerateNumerator = 1 and framerateDenominator = 3 (a rate of 1/3 frame per second) will capture the first frame, then 1 frame every 3s. Files will be named as filename.NNNNNNN.jpg where N is the 0-based frame sequence number zero padded to 7 decimal places.", "H264Settings$FramerateDenominator": "When you use the API for transcode jobs that use frame rate conversion, specify the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use FramerateDenominator to specify the denominator of this fraction. In this example, use 1001 for the value of FramerateDenominator. When you use the console for transcode jobs that use frame rate conversion, provide the value as a decimal number for Framerate. In this example, specify 23.976.", @@ -4482,7 +4482,7 @@ "H265Settings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", "H265Settings$ParNumerator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parNumerator is 40.", "HlsGroupSettings$SegmentLength": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (HlsSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.", - "HlsGroupSettings$SegmentsPerSubdirectory": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.", + "HlsGroupSettings$SegmentsPerSubdirectory": "Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.", "Input$ProgramNumber": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.", "InputTemplate$ProgramNumber": "Use Program (programNumber) to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.", "Mpeg2Settings$ParDenominator": "Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the console, this corresponds to any value other than Follow source. When you specify an output pixel aspect ratio (PAR) that is different from your input video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC widescreen, you would specify the ratio 40:33. In this example, the value for parDenominator is 33.", @@ -5374,7 +5374,7 @@ "base": null, "refs": { "AudioSelector$CustomLanguageCode": "Selects a specific language code from within an audio source, using the ISO 639-2 or ISO 639-3 three-letter language code", - "CaptionSelector$CustomLanguageCode": "The specific language to extract from source, using the ISO 639-2 or ISO 639-3 three-letter language code. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in or SMPTE-TT, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions.", + "CaptionSelector$CustomLanguageCode": "The specific language to extract from source, using the ISO 639-2 or ISO 639-3 three-letter language code. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions.", "HlsCaptionLanguageMapping$CustomLanguageCode": "Specify the language for this captions channel, using the ISO 639-2 or ISO 639-3 three-letter language code" } }, diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index 064d7f1a229..520a12797e1 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -7633,10 +7633,10 @@ }, "DeploymentConfig":{ "type":"structure", - "required":["BlueGreenUpdatePolicy"], "members":{ "BlueGreenUpdatePolicy":{"shape":"BlueGreenUpdatePolicy"}, - "AutoRollbackConfiguration":{"shape":"AutoRollbackConfig"} + "AutoRollbackConfiguration":{"shape":"AutoRollbackConfig"}, + "RollingUpdatePolicy":{"shape":"RollingUpdatePolicy"} } }, "DeploymentRecommendation":{ @@ -10237,7 +10237,8 @@ "RollingBack", "InService", "Deleting", - "Failed" + "Failed", + "UpdateRollbackFailed" ] }, "EndpointSummary":{ @@ -14403,7 +14404,7 @@ }, "MaximumExecutionTimeoutInSeconds":{ "type":"integer", - "max":14400, + "max":28800, "min":600 }, "MaximumRetryAttempts":{ @@ -18170,6 +18171,19 @@ "min":20, "pattern":"^arn:aws[a-z\\-]*:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+$" }, + "RollingUpdatePolicy":{ + "type":"structure", + "required":[ + "MaximumBatchSize", + "WaitIntervalInSeconds" + ], + "members":{ + "MaximumBatchSize":{"shape":"CapacitySize"}, + "WaitIntervalInSeconds":{"shape":"WaitIntervalInSeconds"}, + "MaximumExecutionTimeoutInSeconds":{"shape":"MaximumExecutionTimeoutInSeconds"}, + "RollbackMaximumBatchSize":{"shape":"CapacitySize"} + } + }, "RootAccess":{ "type":"string", "enum":[ diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index 3398287bb96..97167fa526c 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -1551,8 +1551,10 @@ } }, "CapacitySize": { - "base": "Specifies the endpoint capacity to activate for production.
", + "base": "Specifies the type and size of the endpoint capacity to activate for a blue/green deployment, a rolling deployment, or a rollback strategy. You can specify your batches as either instance count or the overall percentage or your fleet.
For a rollback strategy, if you don't specify the fields in this object, or if you set the Value
to 100%, then SageMaker uses a blue/green rollback strategy and rolls all traffic back to the blue fleet.
Batch size for each rolling step to provision capacity and turn on traffic on the new endpoint fleet, and terminate capacity on the old endpoint fleet. Value must be between 5% to 50% of the variant's total instance count.
", + "RollingUpdatePolicy$RollbackMaximumBatchSize": "Batch size for rollback to the old endpoint fleet. Each rolling step to provision capacity and turn on traffic on the old endpoint fleet, and terminate capacity on the new endpoint fleet. If this field is absent, the default value will be set to 100% of total capacity which means to bring up the whole capacity of the old fleet at once during rollback.
", "TrafficRoutingConfig$CanarySize": "Batch size for the first step to turn on traffic on the new endpoint fleet. Value
must be less than or equal to 50% of the variant's total instance count.
Batch size for each step to turn on traffic on the new endpoint fleet. Value
must be 10-50% of the variant's total instance count.
Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in TerminationWaitInSeconds
and WaitIntervalInSeconds
.
Maximum execution timeout for the deployment. Note that the timeout value should be larger than the total waiting time specified in TerminationWaitInSeconds
and WaitIntervalInSeconds
.
The time limit for the total deployment. Exceeding this limit causes a timeout.
" } }, "MaximumRetryAttempts": { @@ -11891,6 +11894,12 @@ "UserSettings$ExecutionRole": "The execution role for the user.
" } }, + "RollingUpdatePolicy": { + "base": "Specifies a rolling deployment strategy for updating a SageMaker endpoint.
", + "refs": { + "DeploymentConfig$RollingUpdatePolicy": "Specifies a rolling deployment strategy for updating a SageMaker endpoint.
" + } + }, "RootAccess": { "base": null, "refs": { @@ -14998,6 +15007,7 @@ "WaitIntervalInSeconds": { "base": null, "refs": { + "RollingUpdatePolicy$WaitIntervalInSeconds": "The length of the baking period, during which SageMaker monitors alarms for each batch on the new fleet.
", "TrafficRoutingConfig$WaitIntervalInSeconds": "The waiting time (in seconds) between incremental steps to turn on traffic on the new endpoint fleet.
" } }, diff --git a/models/apis/transfer/2018-11-05/api-2.json b/models/apis/transfer/2018-11-05/api-2.json index cdb392f07b0..2f696f014cc 100644 --- a/models/apis/transfer/2018-11-05/api-2.json +++ b/models/apis/transfer/2018-11-05/api-2.json @@ -961,9 +961,15 @@ "EncryptionAlgorithm":{"shape":"EncryptionAlg"}, "SigningAlgorithm":{"shape":"SigningAlg"}, "MdnSigningAlgorithm":{"shape":"MdnSigningAlg"}, - "MdnResponse":{"shape":"MdnResponse"} + "MdnResponse":{"shape":"MdnResponse"}, + "BasicAuthSecretId":{"shape":"As2ConnectorSecretId"} } }, + "As2ConnectorSecretId":{ + "type":"string", + "max":2048, + "min":0 + }, "As2Id":{ "type":"string", "max":128, diff --git a/models/apis/transfer/2018-11-05/docs-2.json b/models/apis/transfer/2018-11-05/docs-2.json index 1fec5b7dc4d..747d0c57fa8 100644 --- a/models/apis/transfer/2018-11-05/docs-2.json +++ b/models/apis/transfer/2018-11-05/docs-2.json @@ -134,6 +134,12 @@ "UpdateConnectorRequest$As2Config": "A structure that contains the parameters for a connector object.
" } }, + "As2ConnectorSecretId": { + "base": null, + "refs": { + "As2ConnectorConfig$BasicAuthSecretId": "Provides Basic authentication support to the AS2 Connectors API. To use Basic authentication, you must provide the name or Amazon Resource Name (ARN) of a secret in Secrets Manager.
The default value for this parameter is null
, which indicates that Basic authentication is not enabled for the connector.
If the connector should use Basic authentication, the secret needs to be in the following format:
{ \"Username\": \"user-name\", \"Password\": \"user-password\" }
Replace user-name
and user-password
with the credentials for the actual user that is being authenticated.
Note the following:
You are storing these credentials in Secrets Manager, not passing them directly into this API.
If you are using the API, SDKs, or CloudFormation to configure your connector, then you must create the secret before you can enable Basic authentication. However, if you are using the Amazon Web Services management console, you can have the system create the secret for you.
If you have previously enabled Basic authentication for a connector, you can disable it by using the UpdateConnector
API call. For example, if you are using the CLI, you can run the following command to remove Basic authentication:
update-connector --connector-id my-connector-id --as2-config 'BasicAuthSecretId=\"\"'
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
", - "CreateAgreementRequest$AccessRole": "With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue
permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt
permission for that key.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue
permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt
permission for that key.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.
", "CreateServerRequest$LoggingRole": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs.
", "CreateUserRequest$Role": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
", "DescribedAccess$Role": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
", - "DescribedAgreement$AccessRole": "With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue
permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt
permission for that key.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue
permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt
permission for that key.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.
", "DescribedExecution$ExecutionRole": "The IAM role associated with the execution.
", "DescribedServer$LoggingRole": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs.
", @@ -1482,8 +1488,8 @@ "ListedUser$Role": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
The IAM role that controls your users' access to your Amazon S3 bucket for servers with Domain=S3
, or your EFS file system for servers with Domain=EFS
.
The policies attached to this role determine the level of access you want to provide your users when transferring files into and out of your S3 buckets or EFS file systems.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs.
", "UpdateAccessRequest$Role": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
", - "UpdateAgreementRequest$AccessRole": "With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue
permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt
permission for that key.
With AS2, you can send files by calling StartFileTransfer
and specifying the file paths in the request parameter, SendFilePaths
. We use the file’s parent directory (for example, for --send-file-paths /bucket/dir/file.txt
, parent directory is /bucket/dir/
) to temporarily store a processed AS2 message file, store the MDN when we receive them from the partner, and write a final JSON file containing relevant metadata of the transmission. So, the AccessRole
needs to provide read and write access to the parent directory of the file location used in the StartFileTransfer
request. Additionally, you need to provide read and write access to the parent directory of the files that you intend to send with StartFileTransfer
.
If you are using Basic authentication for your AS2 connector, the access role requires the secretsmanager:GetSecretValue
permission for the secret. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key in Secrets Manager, then the role also needs the kms:Decrypt
permission for that key.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that allows a connector to turn on CloudWatch logging for Amazon S3 events. When set, you can view connector activity in your CloudWatch logs.
", "UpdateUserRequest$Role": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that controls your users' access to your Amazon S3 bucket or Amazon EFS file system. The policies attached to this role determine the level of access that you want to provide your users when transferring files into and out of your Amazon S3 bucket or Amazon EFS file system. The IAM role should also contain a trust relationship that allows the server to access your resources when servicing your users' transfer requests.
", "WorkflowDetail$ExecutionRole": "Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources
" diff --git a/models/apis/verifiedpermissions/2021-12-01/docs-2.json b/models/apis/verifiedpermissions/2021-12-01/docs-2.json index 9d52f3a2e6e..37a95cb860b 100644 --- a/models/apis/verifiedpermissions/2021-12-01/docs-2.json +++ b/models/apis/verifiedpermissions/2021-12-01/docs-2.json @@ -296,7 +296,7 @@ } }, "EvaluationErrorItem": { - "base": "Contains a description of an evaluation error.
This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.
", + "base": "Contains a description of an evaluation error.
This data type is used as a request parameter in the IsAuthorized and IsAuthorizedWithToken operations.
", "refs": { "EvaluationErrorList$member": null } @@ -361,10 +361,10 @@ "IdempotencyToken": { "base": null, "refs": { - "CreateIdentitySourceInput$clientToken": "Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.
Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.
Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.
Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.
Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.
Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.
Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.
Specifies a unique, case-sensitive ID that you provide to ensure the idempotency of the request. This lets you safely retry the request without accidentally performing the same operation a second time. Passing the same value to a later call to an operation requires that you also pass the same value for all other parameters. We recommend that you use a UUID type of value..
If you don't provide this value, then Amazon Web Services generates a random one for you.
If you retry the operation with the same ClientToken
, but with different parameters, the retry fails with an IdempotentParameterMismatch
error.