diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a33eb13e5e..2d3cc49d59b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +Release v1.50.5 (2024-01-26) +=== + +### Service Client Updates +* `service/connect`: Updates service API +* `service/inspector2`: Updates service API and documentation +* `service/sagemaker`: Updates service API and documentation + * Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs. + Release v1.50.4 (2024-01-25) === diff --git a/aws/version.go b/aws/version.go index 7269acfb359..52027115d33 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.4" +const SDKVersion = "1.50.5" diff --git a/models/apis/connect/2017-08-08/api-2.json b/models/apis/connect/2017-08-08/api-2.json index fcf379598ef..3752722d620 100644 --- a/models/apis/connect/2017-08-08/api-2.json +++ b/models/apis/connect/2017-08-08/api-2.json @@ -11460,7 +11460,7 @@ }, "PredefinedAttributeName":{ "type":"string", - "max":128, + "max":64, "min":1 }, "PredefinedAttributeSearchConditionList":{ @@ -11481,13 +11481,13 @@ }, "PredefinedAttributeStringValue":{ "type":"string", - "max":128, + "max":64, "min":1 }, "PredefinedAttributeStringValuesList":{ "type":"list", "member":{"shape":"PredefinedAttributeStringValue"}, - "max":75, + "max":128, "min":1 }, "PredefinedAttributeSummary":{ diff --git a/models/apis/inspector2/2020-06-08/api-2.json b/models/apis/inspector2/2020-06-08/api-2.json index adc13b478b7..dd31548c61c 100644 --- a/models/apis/inspector2/2020-06-08/api-2.json +++ b/models/apis/inspector2/2020-06-08/api-2.json @@ -1894,7 +1894,7 @@ "CisSessionMessages":{ "type":"list", "member":{"shape":"CisSessionMessage"}, - "max":50, + "max":150, "min":1 }, "CisSortOrder":{ @@ -2173,6 +2173,7 @@ "ec2InstanceTags":{"shape":"CoverageMapFilterList"}, "ecrImageTags":{"shape":"CoverageStringFilterList"}, "ecrRepositoryName":{"shape":"CoverageStringFilterList"}, + "imagePulledAt":{"shape":"CoverageDateFilterList"}, "lambdaFunctionName":{"shape":"CoverageStringFilterList"}, "lambdaFunctionRuntime":{"shape":"CoverageStringFilterList"}, "lambdaFunctionTags":{"shape":"CoverageMapFilterList"}, @@ -2698,6 +2699,7 @@ "type":"structure", "required":["rescanDuration"], "members":{ + "pullDateRescanDuration":{"shape":"EcrPullDateRescanDuration"}, "rescanDuration":{"shape":"EcrRescanDuration"} } }, @@ -2710,9 +2712,20 @@ "EcrContainerImageMetadata":{ "type":"structure", "members":{ + "imagePulledAt":{"shape":"DateTimeTimestamp"}, "tags":{"shape":"TagList"} } }, + "EcrPullDateRescanDuration":{ + "type":"string", + "enum":[ + "DAYS_14", + "DAYS_30", + "DAYS_60", + "DAYS_90", + "DAYS_180" + ] + }, "EcrRepositoryMetadata":{ "type":"structure", "members":{ @@ -2725,12 +2738,16 @@ "enum":[ "LIFETIME", "DAYS_30", - "DAYS_180" + "DAYS_180", + "DAYS_14", + "DAYS_60", + "DAYS_90" ] }, "EcrRescanDurationState":{ "type":"structure", "members":{ + "pullDateRescanDuration":{"shape":"EcrPullDateRescanDuration"}, "rescanDuration":{"shape":"EcrRescanDuration"}, "status":{"shape":"EcrRescanDurationStatus"}, "updatedAt":{"shape":"DateTimeTimestamp"} diff --git a/models/apis/inspector2/2020-06-08/docs-2.json b/models/apis/inspector2/2020-06-08/docs-2.json index d48b2741ef3..c0326181bd3 100644 --- a/models/apis/inspector2/2020-06-08/docs-2.json +++ b/models/apis/inspector2/2020-06-08/docs-2.json @@ -57,7 +57,7 @@ "UpdateCisScanConfiguration": "
Updates a CIS scan configuration.
", "UpdateConfiguration": "Updates setting configurations for your Amazon Inspector account. When you use this API as an Amazon Inspector delegated administrator this updates the setting for all accounts you manage. Member accounts in an organization cannot update this setting.
", "UpdateEc2DeepInspectionConfiguration": "Activates, deactivates Amazon Inspector deep inspection, or updates custom paths for your account.
", - "UpdateEncryptionKey": "Updates an encryption key. A ResourceNotFoundException
means that an AWS owned key is being used for encryption.
Updates an encryption key. A ResourceNotFoundException
means that an Amazon Web Services owned key is being used for encryption.
Specifies the action that is to be applied to the findings that match the filter.
", "UpdateOrgEc2DeepInspectionConfiguration": "Updates the Amazon Inspector deep inspection custom paths for your organization. You must be an Amazon Inspector delegated administrator to use this API.
", "UpdateOrganizationConfiguration": "Updates the configurations for your Amazon Inspector organization.
" @@ -117,8 +117,8 @@ "GetCisScanResultDetailsRequest$accountId": "The account ID.
", "GetMemberRequest$accountId": "The Amazon Web Services account ID of the member account to retrieve information on.
", "ImageLayerAggregationResponse$accountId": "The ID of the Amazon Web Services account that owns the container image hosting the layer image.
", - "LambdaFunctionAggregationResponse$accountId": "The ID of the AWS account that owns the AWS Lambda function.
", - "LambdaLayerAggregationResponse$accountId": "The account ID of the AWS Lambda function layer.
", + "LambdaFunctionAggregationResponse$accountId": "The ID of the Amazon Web Services account that owns the Amazon Web Services Lambda function.
", + "LambdaLayerAggregationResponse$accountId": "The account ID of the Amazon Web Services Lambda function layer.
", "Member$accountId": "The Amazon Web Services account ID of the member account.
", "Member$delegatedAdminAccountId": "The Amazon Web Services account ID of the Amazon Inspector delegated administrator for this member account.
", "MemberAccountEc2DeepInspectionStatus$accountId": "The unique identifier for the Amazon Web Services account of the organization member.
", @@ -252,7 +252,7 @@ "ArchitectureList": { "base": null, "refs": { - "AwsLambdaFunctionDetails$architectures": "The instruction set architecture that the AWS Lambda function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64
.
The instruction set architecture that the Amazon Web Services Lambda function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64
.
A summary of information about the AWS Lambda function.
", + "base": "A summary of information about the Amazon Web Services Lambda function.
", "refs": { - "ResourceDetails$awsLambdaFunction": "A summary of the information about an AWS Lambda function affected by a finding.
" + "ResourceDetails$awsLambdaFunction": "A summary of the information about an Amazon Web Services Lambda function affected by a finding.
" } }, "BadRequestException": { @@ -417,8 +417,8 @@ "refs": { "AutoEnable$ec2": "Represents whether Amazon EC2 scans are automatically enabled for new members of your Amazon Inspector organization.
", "AutoEnable$ecr": "Represents whether Amazon ECR scans are automatically enabled for new members of your Amazon Inspector organization.
", - "AutoEnable$lambda": "Represents whether AWS Lambda standard scans are automatically enabled for new members of your Amazon Inspector organization.
", - "AutoEnable$lambdaCode": "Represents whether AWS Lambda code scans are automatically enabled for new members of your Amazon Inspector organization.
</p>
",
+ "AutoEnable$lambda": "Represents whether Amazon Web Services Lambda standard scans are automatically enabled for new members of your Amazon Inspector organization.
", + "AutoEnable$lambdaCode": "Represents whether Lambda code scans are automatically enabled for new members of your Amazon Inspector organization.
</p>
",
"DescribeOrganizationConfigurationResponse$maxAccountLimitReached": "Represents whether your organization has reached the maximum Amazon Web Services account limit for Amazon Inspector.
", "ListMembersRequest$onlyAssociated": "Specifies whether to list only currently associated members if True
or to list all members within the organization if False
.
Whether Amazon Inspector deep inspection is active in the account. If TRUE
Amazon Inspector deep inspection is active, if FALSE
it is not active.
The date an image was last pulled at.
", "CoverageFilterCriteria$lastScannedAt": "Filters Amazon Web Services resources based on whether Amazon Inspector has checked them for vulnerabilities within the specified time range.
" } }, @@ -1061,7 +1062,7 @@ "base": null, "refs": { "CoverageFilterCriteria$ec2InstanceTags": "The Amazon EC2 instance tags to filter on.
", - "CoverageFilterCriteria$lambdaFunctionTags": "Returns coverage statistics for AWS Lambda functions filtered by tag.
" + "CoverageFilterCriteria$lambdaFunctionTags": "Returns coverage statistics for Amazon Web Services Lambda functions filtered by tag.
" } }, "CoverageResourceType": { @@ -1088,8 +1089,8 @@ "CoverageFilterCriteria$accountId": "An array of Amazon Web Services account IDs to return coverage statistics for.
", "CoverageFilterCriteria$ecrImageTags": "The Amazon ECR image tags to filter on.
", "CoverageFilterCriteria$ecrRepositoryName": "The Amazon ECR repository name to filter on.
", - "CoverageFilterCriteria$lambdaFunctionName": "Returns coverage statistics for AWS Lambda functions filtered by function names.
", - "CoverageFilterCriteria$lambdaFunctionRuntime": "Returns coverage statistics for AWS Lambda functions filtered by runtime.
", + "CoverageFilterCriteria$lambdaFunctionName": "Returns coverage statistics for Amazon Web Services Lambda functions filtered by function names.
", + "CoverageFilterCriteria$lambdaFunctionRuntime": "Returns coverage statistics for Amazon Web Services Lambda functions filtered by runtime.
", "CoverageFilterCriteria$resourceId": "An array of Amazon Web Services resource IDs to return coverage statistics for.
", "CoverageFilterCriteria$resourceType": "An array of Amazon Web Services resource types to return coverage statistics for. The values can be AWS_EC2_INSTANCE
, AWS_LAMBDA_FUNCTION
, AWS_ECR_CONTAINER_IMAGE
, AWS_ECR_REPOSITORY
or AWS_ACCOUNT
.
The scan status code to filter on. Valid values are: ValidationException
, InternalServerException
, ResourceNotFoundException
, BadRequestException
, and ThrottlingException
.
Details on the Amazon ECR image push date and time used to filter findings.
", "FilterCriteria$firstObservedAt": "Details on the date and time a finding was first seen used to filter findings.
", - "FilterCriteria$lambdaFunctionLastModifiedAt": "Filters the list of AWS Lambda functions by the date and time that a user last updated the configuration, in ISO 8601 format
", + "FilterCriteria$lambdaFunctionLastModifiedAt": "Filters the list of Amazon Web Services Lambda functions by the date and time that a user last updated the configuration, in ISO 8601 format
", "FilterCriteria$lastObservedAt": "Details on the date and time a finding was last seen used to filter findings.
", "FilterCriteria$updatedAt": "Details on the date and time a finding was last updated at used to filter findings.
" } @@ -1282,6 +1283,7 @@ "CoverageDateFilter$endInclusive": "A timestamp representing the end of the time period to filter results by.
", "CoverageDateFilter$startInclusive": "A timestamp representing the start of the time period to filter results by.
", "CoveredResource$lastScannedAt": "The date and time the resource was last checked for vulnerabilities.
", + "EcrContainerImageMetadata$imagePulledAt": "The date an image was last pulled at.
", "EcrRescanDurationState$updatedAt": "A timestamp representing when the last time the ECR scan duration setting was changed.
", "ExploitabilityDetails$lastKnownExploitAt": "The date and time of the last exploit associated with a finding discovered in your environment.
", "Filter$createdAt": "The date and time this filter was created at.
", @@ -1289,7 +1291,7 @@ "Finding$firstObservedAt": "The date and time that the finding was first observed.
", "Finding$lastObservedAt": "The date and time that the finding was last observed.
", "Finding$updatedAt": "The date and time the finding was last updated at.
", - "LambdaFunctionAggregationResponse$lastModifiedAt": "The date that the AWS Lambda function included in the aggregation results was last changed.
", + "LambdaFunctionAggregationResponse$lastModifiedAt": "The date that the Amazon Web Services Lambda function included in the aggregation results was last changed.
", "Member$updatedAt": "A timestamp showing when the status of this member was last updated.
", "PackageVulnerabilityDetails$vendorCreatedAt": "The date and time that this vulnerability was first added to the vendor's database.
", "PackageVulnerabilityDetails$vendorUpdatedAt": "The date and time the vendor last updated this vulnerability in their database.
" @@ -1486,6 +1488,13 @@ "ResourceScanMetadata$ecrImage": "An object that contains details about the container metadata for an Amazon ECR image.
" } }, + "EcrPullDateRescanDuration": { + "base": null, + "refs": { + "EcrConfiguration$pullDateRescanDuration": "The rescan duration configured for image pull date.
", + "EcrRescanDurationState$pullDateRescanDuration": "The rescan duration configured for image pull date.
" + } + }, "EcrRepositoryMetadata": { "base": "Information on the Amazon ECR repository metadata associated with a finding.
", "refs": { @@ -1495,14 +1504,14 @@ "EcrRescanDuration": { "base": null, "refs": { - "EcrConfiguration$rescanDuration": "The ECR automated re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the automated re-scan duration the monitoring state of that image becomes inactive
and all associated findings are scheduled for closure.
The ECR automated re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the automated re-scan duration the monitoring state of that image becomes inactive
and all associated findings are scheduled for closure.
The rescan duration configured for image push date.
", + "EcrRescanDurationState$rescanDuration": "The rescan duration configured for image push date.
</p>
"
}
},
"EcrRescanDurationState": {
- "base": "Details about the state of any changes to the ECR automated re-scan duration setting.
", + "base": "Details about the state of your ECR re-scan duration settings. The ECR re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the duration configured for image pull date, and the duration configured for image pull date, the monitoring state of that image becomes inactive
and all associated findings are scheduled for closure.
An object that contains details about the state of the ECR automated re-scan setting.
" + "EcrConfigurationState$rescanDurationState": "An object that contains details about the state of the ECR re-scan settings.
" } }, "EcrRescanDurationStatus": { @@ -1613,7 +1622,7 @@ "ExecutionRoleArn": { "base": null, "refs": { - "AwsLambdaFunctionDetails$executionRoleArn": "The AWS Lambda function's execution role.
" + "AwsLambdaFunctionDetails$executionRoleArn": "The Amazon Web Services Lambda function's execution role.
" } }, "ExploitAvailable": { @@ -1921,7 +1930,7 @@ "FunctionName": { "base": null, "refs": { - "AwsLambdaFunctionDetails$functionName": "The name of the AWS Lambda function.
" + "AwsLambdaFunctionDetails$functionName": "The name of the Amazon Web Services Lambda function.
" } }, "GetCisScanReportRequest": { @@ -2119,21 +2128,21 @@ } }, "LambdaFunctionAggregation": { - "base": "The details that define a findings aggregation based on AWS Lambda functions.
", + "base": "The details that define a findings aggregation based on Amazon Web Services Lambda functions.
", "refs": { - "AggregationRequest$lambdaFunctionAggregation": "Returns an object with findings aggregated by AWS Lambda function.
" + "AggregationRequest$lambdaFunctionAggregation": "Returns an object with findings aggregated by Amazon Web Services Lambda function.
" } }, "LambdaFunctionAggregationResponse": { - "base": "A response that contains the results of an AWS Lambda function finding aggregation.
", + "base": "A response that contains the results of an Amazon Web Services Lambda function finding aggregation.
", "refs": { - "AggregationResponse$lambdaFunctionAggregation": "An aggregation of findings by AWS Lambda function.
" + "AggregationResponse$lambdaFunctionAggregation": "An aggregation of findings by Amazon Web Services Lambda function.
" } }, "LambdaFunctionMetadata": { - "base": "The AWS Lambda function metadata.
", + "base": "The Amazon Web Services Lambda function metadata.
", "refs": { - "ResourceScanMetadata$lambdaFunction": "An object that contains metadata details for an AWS Lambda function.
" + "ResourceScanMetadata$lambdaFunction": "An object that contains metadata details for an Amazon Web Services Lambda function.
" } }, "LambdaFunctionSortBy": { @@ -2143,15 +2152,15 @@ } }, "LambdaLayerAggregation": { - "base": "The details that define a findings aggregation based on an AWS Lambda function's layers.
", + "base": "The details that define a findings aggregation based on an Amazon Web Services Lambda function's layers.
", "refs": { - "AggregationRequest$lambdaLayerAggregation": "Returns an object with findings aggregated by AWS Lambda layer.
" + "AggregationRequest$lambdaLayerAggregation": "Returns an object with findings aggregated by Amazon Web Services Lambda layer.
" } }, "LambdaLayerAggregationResponse": { - "base": "A response that contains the results of an AWS Lambda function layer finding aggregation.
", + "base": "A response that contains the results of an Amazon Web Services Lambda function layer finding aggregation.
", "refs": { - "AggregationResponse$lambdaLayerAggregation": "An aggregation of findings by AWS Lambda layer.
" + "AggregationResponse$lambdaLayerAggregation": "An aggregation of findings by Amazon Web Services Lambda layer.
" } }, "LambdaLayerArn": { @@ -2159,13 +2168,13 @@ "refs": { "CodeVulnerabilityDetails$sourceLambdaLayerArn": "The Amazon Resource Name (ARN) of the Lambda layer that the code vulnerability was detected in.
", "LayerList$member": null, - "VulnerablePackage$sourceLambdaLayerArn": "The Amazon Resource Number (ARN) of the AWS Lambda function affected by a finding.
" + "VulnerablePackage$sourceLambdaLayerArn": "The Amazon Resource Number (ARN) of the Amazon Web Services Lambda function affected by a finding.
" } }, "LambdaLayerList": { "base": null, "refs": { - "LambdaFunctionMetadata$layers": "The layers for an AWS Lambda function. A Lambda function can have up to five layers.
" + "LambdaFunctionMetadata$layers": "The layers for an Amazon Web Services Lambda function. A Lambda function can have up to five layers.
" } }, "LambdaLayerSortBy": { @@ -2175,9 +2184,9 @@ } }, "LambdaVpcConfig": { - "base": "The VPC security groups and subnets that are attached to an AWS Lambda function. For more information, see VPC Settings.
", + "base": "The VPC security groups and subnets that are attached to an Amazon Web Services Lambda function. For more information, see VPC Settings.
", "refs": { - "AwsLambdaFunctionDetails$vpcConfig": "The AWS Lambda function's networking configuration.
" + "AwsLambdaFunctionDetails$vpcConfig": "The Amazon Web Services Lambda function's networking configuration.
" } }, "LastSeen": { @@ -2190,7 +2199,7 @@ "LayerList": { "base": null, "refs": { - "AwsLambdaFunctionDetails$layers": "The AWS Lambda function's layers. A Lambda function can have up to five layers.
" + "AwsLambdaFunctionDetails$layers": "The Amazon Web Services Lambda function's layers. A Lambda function can have up to five layers.
" } }, "ListAccountPermissionsMaxResults": { @@ -2596,7 +2605,7 @@ "AwsEcrContainerImageDetails$architecture": "The architecture of the Amazon ECR container image.
", "AwsEcrContainerImageDetails$registry": "The registry for the Amazon ECR container image.
", "AwsEcrContainerImageDetails$repositoryName": "The name of the repository the Amazon ECR container image resides in.
", - "AwsLambdaFunctionDetails$codeSha256": "The SHA256 hash of the AWS Lambda function's deployment package.
", + "AwsLambdaFunctionDetails$codeSha256": "The SHA256 hash of the Amazon Web Services Lambda function's deployment package.
", "CodeFilePath$fileName": "The name of the file the code vulnerability was found in.
", "CodeFilePath$filePath": "The file path to the code that a vulnerability was found in.
", "CodeSnippetError$errorMessage": "The error message received when Amazon Inspector failed to retrieve a code snippet.
", @@ -2628,9 +2637,9 @@ "ImageLayerAggregationResponse$resourceId": "The resource ID of the container image layer.
", "ImageTagList$member": null, "LambdaFunctionAggregationResponse$resourceId": "The resource IDs included in the aggregation results.
", - "LambdaLayerAggregationResponse$functionName": "The names of the AWS Lambda functions associated with the layers.
", - "LambdaLayerAggregationResponse$layerArn": "The Amazon Resource Name (ARN) of the AWS Lambda function layer.
", - "LambdaLayerAggregationResponse$resourceId": "The Resource ID of the AWS Lambda function layer.
", + "LambdaLayerAggregationResponse$functionName": "The names of the Amazon Web Services Lambda functions associated with the layers.
", + "LambdaLayerAggregationResponse$layerArn": "The Amazon Resource Name (ARN) of the Amazon Web Services Lambda function layer.
", + "LambdaLayerAggregationResponse$resourceId": "The Resource ID of the Amazon Web Services Lambda function layer.
", "MemberAccountEc2DeepInspectionStatusState$errorMessage": "The error message explaining why the account failed to activate Amazon Inspector deep inspection.
", "NonEmptyStringList$member": null, "PackageAggregationResponse$packageName": "The name of the operating system package.
", @@ -3023,7 +3032,7 @@ "base": null, "refs": { "ResourceFilterCriteria$ec2InstanceTags": "The EC2 instance tags used as resource filter criteria.
", - "ResourceFilterCriteria$lambdaFunctionTags": "The AWS Lambda function tags used as resource filter criteria.
" + "ResourceFilterCriteria$lambdaFunctionTags": "The Amazon Web Services Lambda function tags used as resource filter criteria.
" } }, "ResourceNotFoundException": { @@ -3075,7 +3084,7 @@ "ResourceFilterCriteria$accountId": "The account IDs used as resource filter criteria.
", "ResourceFilterCriteria$ecrImageTags": "The ECR image tags used as resource filter criteria.
", "ResourceFilterCriteria$ecrRepositoryName": "The ECR repository names used as resource filter criteria.
", - "ResourceFilterCriteria$lambdaFunctionName": "The AWS Lambda function name used as resource filter criteria.
", + "ResourceFilterCriteria$lambdaFunctionName": "The Amazon Web Services Lambda function name used as resource filter criteria.
", "ResourceFilterCriteria$resourceId": "The resource IDs used as resource filter criteria.
", "ResourceFilterCriteria$resourceType": "The resource types used as resource filter criteria.
" } @@ -3118,8 +3127,8 @@ "Runtime": { "base": null, "refs": { - "AwsLambdaFunctionDetails$runtime": "The runtime environment for the AWS Lambda function.
", - "LambdaFunctionMetadata$runtime": "An AWS Lambda function's runtime.
" + "AwsLambdaFunctionDetails$runtime": "The runtime environment for the Amazon Web Services Lambda function.
", + "LambdaFunctionMetadata$runtime": "An Amazon Web Services Lambda function's runtime.
" } }, "SbomReportFormat": { @@ -3189,7 +3198,7 @@ "SecurityGroupIdList": { "base": null, "refs": { - "LambdaVpcConfig$securityGroupIds": "The VPC security groups and subnets that are attached to an AWS Lambda function. For more information, see VPC Settings.
" + "LambdaVpcConfig$securityGroupIds": "The VPC security groups and subnets that are attached to an Amazon Web Services Lambda function. For more information, see VPC Settings.
" } }, "SendCisSessionHealthRequest": { @@ -3315,7 +3324,7 @@ "FailedMemberAccountEc2DeepInspectionStatusState$ec2ScanStatus": "The status of EC2 scanning in the account that failed to activate Amazon Inspector deep inspection.
", "ResourceStatus$ec2": "The status of Amazon Inspector scanning for Amazon EC2 resources.
", "ResourceStatus$ecr": "The status of Amazon Inspector scanning for Amazon ECR resources.
", - "ResourceStatus$lambda": "The status of Amazon Inspector scanning for AWS Lambda function.
", + "ResourceStatus$lambda": "The status of Amazon Inspector scanning for Amazon Web Services Lambda function.
", "ResourceStatus$lambdaCode": "The status of Amazon Inspector scanning for custom application code for Amazon Web Services Lambda functions.
", "State$status": "The status of Amazon Inspector for the account.
" } @@ -3401,7 +3410,7 @@ "FreeTrialInfoError$message": "The error message returned.
", "GetCisScanReportResponse$url": "The URL where the CIS scan report PDF can be downloaded.
", "InternalServerException$message": null, - "LambdaFunctionAggregationResponse$functionName": "The AWS Lambda function names included in the aggregation results.
", + "LambdaFunctionAggregationResponse$functionName": "The Amazon Web Services Lambda function names included in the aggregation results.
", "LambdaFunctionAggregationResponse$runtime": "The runtimes included in the aggregation results.
", "LambdaFunctionMetadata$functionName": "The name of a function.
", "LambdaLayerList$member": null, @@ -3461,15 +3470,15 @@ "FilterCriteria$ecrImageRegistry": "Details on the Amazon ECR registry used to filter findings.
", "FilterCriteria$ecrImageRepositoryName": "Details on the name of the Amazon ECR repository used to filter findings.
", "FilterCriteria$ecrImageTags": "The tags attached to the Amazon ECR container image.
", - "FilterCriteria$exploitAvailable": "Filters the list of AWS Lambda findings by the availability of exploits.
", + "FilterCriteria$exploitAvailable": "Filters the list of Amazon Web Services Lambda findings by the availability of exploits.
", "FilterCriteria$findingArn": "Details on the finding ARNs used to filter findings.
", "FilterCriteria$findingStatus": "Details on the finding status types used to filter findings.
", "FilterCriteria$findingType": "Details on the finding types used to filter findings.
", "FilterCriteria$fixAvailable": "Details on whether a fix is available through a version update. This value can be YES
, NO
, or PARTIAL
. A PARTIAL
fix means that some, but not all, of the packages identified in the finding have fixes available through updated versions.
Filters the list of AWS Lambda functions by execution role.
", - "FilterCriteria$lambdaFunctionLayers": "Filters the list of AWS Lambda functions by the function's layers. A Lambda function can have up to five layers.
", - "FilterCriteria$lambdaFunctionName": "Filters the list of AWS Lambda functions by the name of the function.
", - "FilterCriteria$lambdaFunctionRuntime": "Filters the list of AWS Lambda functions by the runtime environment for the Lambda function.
", + "FilterCriteria$lambdaFunctionExecutionRoleArn": "Filters the list of Amazon Web Services Lambda functions by execution role.
", + "FilterCriteria$lambdaFunctionLayers": "Filters the list of Amazon Web Services Lambda functions by the function's layers. A Lambda function can have up to five layers.
", + "FilterCriteria$lambdaFunctionName": "Filters the list of Amazon Web Services Lambda functions by the name of the function.
", + "FilterCriteria$lambdaFunctionRuntime": "Filters the list of Amazon Web Services Lambda functions by the runtime environment for the Lambda function.
", "FilterCriteria$networkProtocol": "Details on network protocol used to filter findings.
", "FilterCriteria$relatedVulnerabilities": "Details on the related vulnerabilities used to filter findings.
", "FilterCriteria$resourceId": "Details on the resource IDs used to filter findings.
", @@ -3482,12 +3491,12 @@ "ImageLayerAggregation$layerHashes": "The hashes associated with the layers.
", "ImageLayerAggregation$repositories": "The repository associated with the container image hosting the layers.
", "ImageLayerAggregation$resourceIds": "The ID of the container image layer.
", - "LambdaFunctionAggregation$functionNames": "The AWS Lambda function names to include in the aggregation results.
", + "LambdaFunctionAggregation$functionNames": "The Amazon Web Services Lambda function names to include in the aggregation results.
", "LambdaFunctionAggregation$resourceIds": "The resource IDs to include in the aggregation results.
", - "LambdaFunctionAggregation$runtimes": "Returns findings aggregated by AWS Lambda function runtime environments.
", - "LambdaLayerAggregation$functionNames": "The names of the AWS Lambda functions associated with the layers.
", - "LambdaLayerAggregation$layerArns": "The Amazon Resource Name (ARN) of the AWS Lambda function layer.
", - "LambdaLayerAggregation$resourceIds": "The resource IDs for the AWS Lambda function layers.
", + "LambdaFunctionAggregation$runtimes": "Returns findings aggregated by Amazon Web Services Lambda function runtime environments.
", + "LambdaLayerAggregation$functionNames": "The names of the Amazon Web Services Lambda functions associated with the layers.
", + "LambdaLayerAggregation$layerArns": "The Amazon Resource Name (ARN) of the Amazon Web Services Lambda function layer.
", + "LambdaLayerAggregation$resourceIds": "The resource IDs for the Amazon Web Services Lambda function layers.
", "ListFindingAggregationsRequest$accountIds": "The Amazon Web Services account IDs to retrieve finding aggregation data for.
", "PackageAggregation$packageNames": "The names of packages to aggregate findings on.
", "RepositoryAggregation$repositories": "The names of repositories to aggregate findings on.
", @@ -3581,7 +3590,7 @@ "Ec2Metadata$tags": "The tags attached to the instance.
", "Filter$tags": "The tags attached to the filter.
", "LambdaFunctionAggregationResponse$lambdaTags": "The tags included in the aggregation results.
", - "LambdaFunctionMetadata$functionTags": "The resource tags on an AWS Lambda function.
", + "LambdaFunctionMetadata$functionTags": "The resource tags on an Amazon Web Services Lambda function.
", "ListTagsForResourceResponse$tags": "The tags associated with the resource.
", "Resource$tags": "The tags attached to the resource.
", "TagResourceRequest$tags": "The tags to be added to a resource.
" @@ -3934,7 +3943,7 @@ "Version": { "base": null, "refs": { - "AwsLambdaFunctionDetails$version": "The version of the AWS Lambda function.
" + "AwsLambdaFunctionDetails$version": "The version of the Amazon Web Services Lambda function.
" } }, "VpcId": { diff --git a/models/apis/sagemaker/2017-07-24/api-2.json b/models/apis/sagemaker/2017-07-24/api-2.json index e46c7ea0909..69df0671ba6 100644 --- a/models/apis/sagemaker/2017-07-24/api-2.json +++ b/models/apis/sagemaker/2017-07-24/api-2.json @@ -1012,6 +1012,14 @@ {"shape":"ResourceNotFound"} ] }, + "DeleteHyperParameterTuningJob":{ + "name":"DeleteHyperParameterTuningJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHyperParameterTuningJobRequest"} + }, "DeleteImage":{ "name":"DeleteImage", "http":{ @@ -8090,6 +8098,13 @@ "members":{ } }, + "DeleteHyperParameterTuningJobRequest":{ + "type":"structure", + "required":["HyperParameterTuningJobName"], + "members":{ + "HyperParameterTuningJobName":{"shape":"HyperParameterTuningJobName"} + } + }, "DeleteImageRequest":{ "type":"structure", "required":["ImageName"], diff --git a/models/apis/sagemaker/2017-07-24/docs-2.json b/models/apis/sagemaker/2017-07-24/docs-2.json index b4d2b71859b..e4a409ba34a 100644 --- a/models/apis/sagemaker/2017-07-24/docs-2.json +++ b/models/apis/sagemaker/2017-07-24/docs-2.json @@ -19,7 +19,7 @@ "CreateContext": "Creates a context. A context is a lineage tracking entity that represents a logical grouping of other tracking or experiment entities. Some examples are an endpoint and a model package. For more information, see Amazon SageMaker ML Lineage Tracking.
", "CreateDataQualityJobDefinition": "Creates a definition for a job that monitors data quality and drift. For information about model monitor, see Amazon SageMaker Model Monitor.
", "CreateDeviceFleet": "Creates a device fleet.
", - "CreateDomain": "Creates a Domain
. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other.
EFS storage
When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.
SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption.
VPC configuration
All traffic between the domain and the EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType
parameter. AppNetworkAccessType
corresponds to the network access type that you choose when you onboard to the domain. The following options are available:
PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value.
VpcOnly
- All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway.
When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections.
NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully.
For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC.
", + "CreateDomain": "Creates a Domain
. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other.
EFS storage
When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.
SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption.
VPC configuration
All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType
parameter. AppNetworkAccessType
corresponds to the network access type that you choose when you onboard to the domain. The following options are available:
PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by Amazon SageMaker, which allows internet access. This is the default value.
VpcOnly
- All traffic is through the specified VPC and subnets. Internet access is disabled by default. To allow internet access, you must specify a NAT gateway.
When internet access is disabled, you won't be able to run a Amazon SageMaker Studio notebook or to train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime or a NAT gateway and your security groups allow outbound connections.
NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully.
For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC.
", "CreateEdgeDeploymentPlan": "Creates an edge deployment plan, consisting of multiple stages. Each stage may have a different deployment configuration and devices.
", "CreateEdgeDeploymentStage": "Creates a new stage in an existing edge deployment plan.
", "CreateEdgePackagingJob": "Starts a SageMaker Edge Manager model packaging job. Edge Manager will use the model artifacts from the Amazon Simple Storage Service bucket that you specify. After the model has been packaged, Amazon SageMaker saves the resulting artifacts to an S3 bucket that you specify.
", @@ -85,6 +85,7 @@ "DeleteHub": "Delete a hub.
Hub APIs are only callable through SageMaker Studio.
Delete the contents of a hub.
Hub APIs are only callable through SageMaker Studio.
Use this operation to delete a human task user interface (worker task template).
To see a list of human task user interfaces (work task templates) in your account, use ListHumanTaskUis. When you delete a worker task template, it no longer appears when you call ListHumanTaskUis
.
Deletes a hyperparameter tuning job. The DeleteHyperParameterTuningJob
API deletes only the tuning job entry that was created in SageMaker when you called the CreateHyperParameterTuningJob
API. It does not delete training jobs, artifacts, or the IAM role that you specified when creating the model.
Deletes a SageMaker image and all versions of the image. The container images aren't deleted.
", "DeleteImageVersion": "Deletes a version of a SageMaker image. The container image the version represents isn't deleted.
", "DeleteInferenceComponent": "Deletes an inference component.
", @@ -293,7 +294,7 @@ "UpdateDeviceFleet": "Updates a fleet of devices.
", "UpdateDevices": "Updates one or more devices in a fleet.
", "UpdateDomain": "Updates the default settings for new user profiles in the domain.
", - "UpdateEndpoint": "Deploys the new EndpointConfig
specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig
(there is no availability loss).
When SageMaker receives the request, it sets the endpoint status to Updating
. After updating the endpoint, it sets the status to InService
. To check the status of an endpoint, use the DescribeEndpoint API.
You must not delete an EndpointConfig
in use by an endpoint that is live or while the UpdateEndpoint
or CreateEndpoint
operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig
.
If you delete the EndpointConfig
of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.
Deploys the EndpointConfig
specified in the request to a new fleet of instances. SageMaker shifts endpoint traffic to the new instances with the updated endpoint configuration and then deletes the old instances using the previous EndpointConfig
(there is no availability loss). For more information about how to control the update and traffic shifting process, see Update models in production.
When SageMaker receives the request, it sets the endpoint status to Updating
. After updating the endpoint, it sets the status to InService
. To check the status of an endpoint, use the DescribeEndpoint API.
You must not delete an EndpointConfig
in use by an endpoint that is live or while the UpdateEndpoint
or CreateEndpoint
operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig
.
If you delete the EndpointConfig
of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.
Updates variant weight of one or more variants associated with an existing endpoint, or capacity of one variant associated with an existing endpoint. When it receives the request, SageMaker sets the endpoint status to Updating
. After updating the endpoint, it sets the status to InService
. To check the status of an endpoint, use the DescribeEndpoint API.
Adds, updates, or removes the description of an experiment. Updates the display name of an experiment.
", "UpdateFeatureGroup": "Updates the feature group by either adding features or updating the online store configuration. Use one of the following request parameters at a time while using the UpdateFeatureGroup
API.
You can add features for your feature group using the FeatureAdditions
request parameter. Features cannot be removed from a feature group.
You can update the online store configuration by using the OnlineStoreConfig
request parameter. If a TtlDuration
is specified, the default TtlDuration
applies for all records added to the feature group after the feature group is updated. If a record level TtlDuration
exists from using the PutRecord
API, the record level TtlDuration
applies to that record instead of the default TtlDuration
.
The name or Amazon Resource Name (ARN) of the model package group that this model version belongs to.
This parameter is required for versioned models, and does not apply to unversioned models.
", "DeleteModelPackageGroupInput$ModelPackageGroupName": "The name of the model group to delete.
", "DescribeAlgorithmInput$AlgorithmName": "The name of the algorithm to describe.
", - "DescribeModelPackageGroupInput$ModelPackageGroupName": "The name of gthe model group to describe.
", + "DescribeModelPackageGroupInput$ModelPackageGroupName": "The name of the model group to describe.
", "HyperParameterAlgorithmSpecification$AlgorithmName": "The name of the resource algorithm to use for the hyperparameter tuning job. If you specify a value for this parameter, do not specify a value for TrainingImage
.
A filter that returns only model versions that belong to the specified model group.
", "SourceAlgorithm$AlgorithmName": "The name of an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your SageMaker account or an algorithm in Amazon Web Services Marketplace that you are subscribed to.
" @@ -3595,6 +3596,11 @@ "refs": { } }, + "DeleteHyperParameterTuningJobRequest": { + "base": null, + "refs": { + } + }, "DeleteImageRequest": { "base": null, "refs": { @@ -6789,6 +6795,7 @@ "base": null, "refs": { "CreateHyperParameterTuningJobRequest$HyperParameterTuningJobName": "The name of the tuning job. This name is the prefix for the names of all training jobs that this tuning job launches. The name must be unique within the same Amazon Web Services account and Amazon Web Services Region. The name must have 1 to 32 characters. Valid characters are a-z, A-Z, 0-9, and : + = @ _ % - (hyphen). The name is not case sensitive.
", + "DeleteHyperParameterTuningJobRequest$HyperParameterTuningJobName": "The name of the hyperparameter tuning job that you want to delete.
", "DescribeHyperParameterTuningJobRequest$HyperParameterTuningJobName": "The name of the tuning job.
", "DescribeHyperParameterTuningJobResponse$HyperParameterTuningJobName": "The name of the hyperparameter tuning job.
", "HyperParameterTrainingJobSummary$TuningJobName": "The HyperParameter tuning job that launched the training job.
", @@ -6835,7 +6842,7 @@ "HyperParameterTuningJobStatus": { "base": null, "refs": { - "DescribeHyperParameterTuningJobResponse$HyperParameterTuningJobStatus": "The status of the tuning job: InProgress, Completed, Failed, Stopping, or Stopped.
", + "DescribeHyperParameterTuningJobResponse$HyperParameterTuningJobStatus": "The status of the tuning job.
", "HyperParameterTuningJobSearchEntity$HyperParameterTuningJobStatus": "The status of a hyperparameter tuning job.
", "HyperParameterTuningJobSummary$HyperParameterTuningJobStatus": "The status of the tuning job.
", "ListHyperParameterTuningJobsRequest$StatusEquals": "A filter that returns only tuning jobs with the specified status.
" @@ -9342,7 +9349,7 @@ } }, "ModelAccessConfig": { - "base": "The access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig
. For more information, see End-user license agreements.
The access configuration file to control access to the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig
.
If you are a Jumpstart user, see the End-user license agreements section for more details on accepting the EULA.
If you are an AutoML user, see the Optional Parameters section of Create an AutoML job to fine-tune text generation models using the API for details on How to set the EULA acceptance when fine-tuning a model using the AutoML API.
Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig
. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.
The ID of the Amazon Elastic File System (EFS) managed by this Domain.
" + "DescribeDomainResponse$HomeEfsFileSystemId": "The ID of the Amazon Elastic File System managed by this Domain.
" } }, "ResourceInUse": { @@ -13078,7 +13085,7 @@ "RecommendationJobCompiledOutputConfig$S3OutputUri": "Identifies the Amazon S3 bucket where you want SageMaker to store the compiled model artifacts.
", "RecommendationJobPayloadConfig$SamplePayloadUrl": "The Amazon Simple Storage Service (Amazon S3) path where the sample payload is stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).
", "RedshiftDatasetDefinition$OutputS3Uri": "The location in Amazon S3 where the Redshift query results are stored.
", - "S3DataSource$S3Uri": "Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix
A manifest might look like this: s3://bucketname/example.manifest
A manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of S3Uri
. Note that the prefix must be a valid non-empty S3Uri
that precludes users from specifying a manifest whose individual S3Uri
is sourced from different S3 buckets.
The following code example shows a valid manifest format:
[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
\"relative/path/custdata-N\"
]
This JSON is equivalent to the following S3Uri
list:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-2
...
s3://customer_bucket/some/prefix/relative/path/custdata-N
The complete set of S3Uri
in this manifest is the input data for the channel for this data source. The object that each S3Uri
points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.
Your input bucket must be located in same Amazon Web Services region as your training job.
", + "S3DataSource$S3Uri": "Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix/
A manifest might look like this: s3://bucketname/example.manifest
A manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of S3Uri
. Note that the prefix must be a valid non-empty S3Uri
that precludes users from specifying a manifest whose individual S3Uri
is sourced from different S3 buckets.
The following code example shows a valid manifest format:
[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
\"relative/path/custdata-N\"
]
This JSON is equivalent to the following S3Uri
list:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-2
...
s3://customer_bucket/some/prefix/relative/path/custdata-N
The complete set of S3Uri
in this manifest is the input data for the channel for this data source. The object that each S3Uri
points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.
Your input bucket must be located in same Amazon Web Services region as your training job.
", "S3StorageConfig$S3Uri": "The S3 URI, or location in Amazon S3, of OfflineStore
.
S3 URIs have a format similar to the following: s3://example-bucket/prefix/
.
The S3 path where offline records are written.
", "SharingSettings$S3OutputPath": "When NotebookOutputOption
is Allowed
, the Amazon S3 bucket used to store the shared notebook snapshots.
Path to Amazon S3 storage location for TensorBoard output.
", "TimeSeriesForecastingJobConfig$FeatureSpecificationS3Uri": "A URL to the Amazon S3 data source containing additional selected features that complement the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig
. When not provided, the AutoML job V2 includes all the columns from the original dataset that are not already declared in TimeSeriesConfig
. If provided, the AutoML job V2 only considers these additional columns as a complement to the ones declared in TimeSeriesConfig
.
You can input FeatureAttributeNames
(optional) in JSON format as shown below:
{ \"FeatureAttributeNames\":[\"col1\", \"col2\", ...] }
.
You can also specify the data type of the feature (optional) in the format shown below:
{ \"FeatureDataTypes\":{\"col1\":\"numeric\", \"col2\":\"categorical\" ... } }
Autopilot supports the following data types: numeric
, categorical
, text
, and datetime
.
These column keys must not include any column set in TimeSeriesConfig
.
The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For example, s3://bucket-name/key-name-prefix
.
For every S3 object used as input for the transform job, batch transform stores the transformed data with an .out
suffix in a corresponding subfolder in the location in the output prefix. For example, for the input data stored at s3://bucket-name/input-name-prefix/dataset01/data.csv
, batch transform stores the transformed data at s3://bucket-name/output-name-prefix/input-name-prefix/data.csv.out
. Batch transform doesn't upload partially processed objects. For an input S3 object that contains multiple records, it creates an .out
file only if the transform job succeeds on the entire file. When the input contains multiple S3 objects, the batch transform job processes the listed S3 objects and uploads only the output for successfully processed objects. If any object fails in the transform job batch transform marks the job as failed to prompt investigation.
Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix
.
A manifest might look like this: s3://bucketname/example.manifest
The manifest is an S3 object which is a JSON file with the following format:
[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
\"relative/path/custdata-N\"
]
The preceding JSON matches the following S3Uris
:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-2
...
s3://customer_bucket/some/prefix/relative/path/custdata-N
The complete set of S3Uris
in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris
points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.
Depending on the value specified for the S3DataType
, identifies either a key name prefix or a manifest. For example:
A key name prefix might look like this: s3://bucketname/exampleprefix/
.
A manifest might look like this: s3://bucketname/example.manifest
The manifest is an S3 object which is a JSON file with the following format:
[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\"relative/path/to/custdata-1\",
\"relative/path/custdata-2\",
...
\"relative/path/custdata-N\"
]
The preceding JSON matches the following S3Uris
:
s3://customer_bucket/some/prefix/relative/path/to/custdata-1
s3://customer_bucket/some/prefix/relative/path/custdata-2
...
s3://customer_bucket/some/prefix/relative/path/custdata-N
The complete set of S3Uris
in this manifest constitutes the input data for the channel for this datasource. The object that each S3Uris
points to must be readable by the IAM role that Amazon SageMaker uses to perform tasks on your behalf.
The Amazon S3 bucket location of the UI template, or worker task template. This is the template used to render the worker UI and tools for labeling job tasks. For more information about the contents of a UI template, see Creating Your Custom Labeling Task Template.
", "WorkspaceSettings$S3ArtifactPath": "The Amazon S3 bucket used to store artifacts generated by Canvas. Updating the Amazon S3 location impacts existing configuration settings, and Canvas users no longer have access to their artifacts. Canvas users must log out and log back in to apply the new location.
" } @@ -14573,7 +14580,7 @@ } }, "ThroughputConfigDescription": { - "base": "Active throughput configuration of the feature group. Used to set feature group throughput configuration. There are two modes: ON_DEMAND
and PROVISIONED
. With on-demand mode, you are charged for data reads and writes that your application performs on your feature group. You do not need to specify read and write throughput because Feature Store accommodates your workloads as they ramp up and down. You can switch a feature group to on-demand only once in a 24 hour period. With provisioned throughput mode, you specify the read and write capacity per second that you expect your application to require, and you are billed based on those limits. Exceeding provisioned throughput will result in your requests being throttled.
Note: PROVISIONED
throughput mode is supported only for feature groups that are offline-only, or use the Standard
tier online store.
Active throughput configuration of the feature group. There are two modes: ON_DEMAND
and PROVISIONED
. With on-demand mode, you are charged for data reads and writes that your application performs on your feature group. You do not need to specify read and write throughput because Feature Store accommodates your workloads as they ramp up and down. You can switch a feature group to on-demand only once in a 24 hour period. With provisioned throughput mode, you specify the read and write capacity per second that you expect your application to require, and you are billed based on those limits. Exceeding provisioned throughput will result in your requests being throttled.
Note: PROVISIONED
throughput mode is supported only for feature groups that are offline-only, or use the Standard
tier online store.