diff --git a/CHANGELOG.md b/CHANGELOG.md index 585d1ea2253..7fee85718f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +Release v1.40.51 (2021-09-28) +=== + +### Service Client Updates +* `service/imagebuilder`: Updates service documentation +* `service/transfer`: Updates service documentation + * Added changes for managed workflows feature APIs. + Release v1.40.50 (2021-09-27) === diff --git a/aws/version.go b/aws/version.go index be47f9b2401..f489032151d 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.50" +const SDKVersion = "1.40.51" diff --git a/models/apis/imagebuilder/2019-12-02/docs-2.json b/models/apis/imagebuilder/2019-12-02/docs-2.json index f58306b7f98..0d19a1367fc 100644 --- a/models/apis/imagebuilder/2019-12-02/docs-2.json +++ b/models/apis/imagebuilder/2019-12-02/docs-2.json @@ -29,8 +29,8 @@ "GetImageRecipePolicy": "

Gets an image recipe policy.

", "GetInfrastructureConfiguration": "

Gets an infrastructure configuration.

", "ImportComponent": "

Imports a component and transforms its data into a component document.

", - "ListComponentBuildVersions": "

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", - "ListComponents": "

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", + "ListComponentBuildVersions": "

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", + "ListComponents": "

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", "ListContainerRecipes": "

Returns a list of container recipes.

", "ListDistributionConfigurations": "

Returns a list of distribution configurations.

", "ListImageBuildVersions": "

Returns a list of image build versions.

", @@ -95,7 +95,7 @@ "AmiNameString": { "base": null, "refs": { - "AmiDistributionConfiguration$name": "

The name of the distribution configuration.

" + "AmiDistributionConfiguration$name": "

The name of the output AMI.

" } }, "Arn": { @@ -652,7 +652,7 @@ "EmptyString": { "base": null, "refs": { - "InstanceBlockDeviceMapping$noDevice": "

Use to remove a mapping from the parent image.

" + "InstanceBlockDeviceMapping$noDevice": "

Use to remove a mapping from the base image.

" } }, "ErrorMessage": { @@ -1036,7 +1036,7 @@ "ImageVersionList": { "base": null, "refs": { - "ListImagesResponse$imageVersionList": "

The list of image semantic versions.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "ListImagesResponse$imageVersionList": "

The list of image semantic versions.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" } }, "ImportComponentRequest": { @@ -1109,7 +1109,7 @@ } }, "InstanceConfiguration": { - "base": "

Defines a custom source AMI and block device mapping configurations of an instance used for building and testing container images.

", + "base": "

Defines a custom base AMI and block device mapping configurations of an instance used for building and testing container images.

", "refs": { "ContainerRecipe$instanceConfiguration": "

A group of options that can be used to configure an instance for building and testing container images.

", "CreateContainerRecipeRequest$instanceConfiguration": "

A group of options that can be used to configure an instance for building and testing container images.

" @@ -1141,7 +1141,7 @@ "InstanceTypeList": { "base": null, "refs": { - "CreateInfrastructureConfigurationRequest$instanceTypes": "

The instance metadata options that you can set for the HTTP requests that pipeline builds use to launch EC2 build and test instances. For more information about instance metadata options, see one of the following links:

", + "CreateInfrastructureConfigurationRequest$instanceTypes": "

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

", "InfrastructureConfiguration$instanceTypes": "

The instance types of the infrastructure configuration.

", "InfrastructureConfigurationSummary$instanceTypes": "

The instance types of the infrastructure configuration.

", "UpdateInfrastructureConfigurationRequest$instanceTypes": "

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" @@ -1367,17 +1367,17 @@ "ContainerRecipe$description": "

The description of the container recipe.

", "ContainerRecipe$owner": "

The owner of the container recipe.

", "ContainerRecipe$kmsKeyId": "

Identifies which KMS key is used to encrypt the container image for distribution to the target Region.

", - "ContainerRecipe$parentImage": "

The source image for the container recipe.

", + "ContainerRecipe$parentImage": "

The base image for the container recipe.

", "ContainerRecipe$workingDirectory": "

The working directory for use during build and test workflows.

", "ContainerRecipeSummary$owner": "

The owner of the container recipe.

", - "ContainerRecipeSummary$parentImage": "

The source image for the container recipe.

", + "ContainerRecipeSummary$parentImage": "

The base image for the container recipe.

", "CreateComponentRequest$description": "

The description of the component. Describes the contents of the component.

", "CreateComponentRequest$changeDescription": "

The change description of the component. Describes what change has been made in this version, or what makes this version different from other versions of this component.

", "CreateComponentRequest$kmsKeyId": "

The ID of the KMS key that should be used to encrypt this component.

", "CreateComponentResponse$requestId": "

The request ID that uniquely identifies this request.

", "CreateContainerRecipeRequest$description": "

The description of the container recipe.

", - "CreateContainerRecipeRequest$imageOsVersionOverride": "

Specifies the operating system version for the source image.

", - "CreateContainerRecipeRequest$parentImage": "

The source image for the container recipe.

", + "CreateContainerRecipeRequest$imageOsVersionOverride": "

Specifies the operating system version for the base image.

", + "CreateContainerRecipeRequest$parentImage": "

The base image for the container recipe.

", "CreateContainerRecipeRequest$workingDirectory": "

The working directory for use during build and test workflows.

", "CreateContainerRecipeRequest$kmsKeyId": "

Identifies which KMS key is used to encrypt the container image.

", "CreateContainerRecipeResponse$requestId": "

The request ID that uniquely identifies this request.

", @@ -1386,7 +1386,7 @@ "CreateImagePipelineRequest$description": "

The description of the image pipeline.

", "CreateImagePipelineResponse$requestId": "

The request ID that uniquely identifies this request.

", "CreateImageRecipeRequest$description": "

The description of the image recipe.

", - "CreateImageRecipeRequest$parentImage": "

The parent image of the image recipe. The value of the string can be the ARN of the parent image or an AMI ID. The format for the ARN follows this example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/x.x.x. You can provide the specific version that you want to use, or you can use a wildcard in all of the fields. If you enter an AMI ID for the string value, you must have access to the AMI, and the AMI must be in the same Region in which you are using Image Builder.

", + "CreateImageRecipeRequest$parentImage": "

The base image of the image recipe. The value of the string can be the ARN of the base image or an AMI ID. The format for the ARN follows this example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/x.x.x. You can provide the specific version that you want to use, or you can use a wildcard in all of the fields. If you enter an AMI ID for the string value, you must have access to the AMI, and the AMI must be in the same Region in which you are using Image Builder.

", "CreateImageRecipeRequest$workingDirectory": "

The working directory used during build and test workflows.

", "CreateImageRecipeResponse$requestId": "

The request ID that uniquely identifies this request.

", "CreateImageResponse$requestId": "

The request ID that uniquely identifies this request.

", @@ -1422,10 +1422,10 @@ "ImagePipeline$description": "

The description of the image pipeline.

", "ImageRecipe$description": "

The description of the image recipe.

", "ImageRecipe$owner": "

The owner of the image recipe.

", - "ImageRecipe$parentImage": "

The parent image of the image recipe.

", + "ImageRecipe$parentImage": "

The base image of the image recipe.

", "ImageRecipe$workingDirectory": "

The working directory to be used during build and test workflows.

", "ImageRecipeSummary$owner": "

The owner of the image recipe.

", - "ImageRecipeSummary$parentImage": "

The parent image of the image recipe.

", + "ImageRecipeSummary$parentImage": "

The base image of the image recipe.

", "ImageState$reason": "

The reason for the image's status.

", "ImageSummary$owner": "

The owner of the image.

", "ImageVersion$owner": "

The owner of the image version.

", @@ -1509,10 +1509,10 @@ "OsVersionList": { "base": null, "refs": { - "Component$supportedOsVersions": "

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

", - "ComponentSummary$supportedOsVersions": "

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

", - "ComponentVersion$supportedOsVersions": "

he operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

", - "CreateComponentRequest$supportedOsVersions": "

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + "Component$supportedOsVersions": "

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

", + "ComponentSummary$supportedOsVersions": "

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

", + "ComponentVersion$supportedOsVersions": "

he operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

", + "CreateComponentRequest$supportedOsVersions": "

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

" } }, "OutputResources": { @@ -1559,7 +1559,7 @@ "PipelineExecutionStartCondition": { "base": null, "refs": { - "Schedule$pipelineExecutionStartCondition": "

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic version filters on the source image or components in your image recipe, EC2 Image Builder will build a new image only when there are new versions of the image or components in your recipe that match the semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference.

" + "Schedule$pipelineExecutionStartCondition": "

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic version filters on the base image or components in your image recipe, EC2 Image Builder will build a new image only when there are new versions of the image or components in your recipe that match the semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference.

" } }, "PipelineStatus": { @@ -1579,7 +1579,7 @@ "ContainerRecipe$platform": "

The system platform for the container, such as Windows or Linux.

", "ContainerRecipeSummary$platform": "

The system platform for the container, such as Windows or Linux.

", "CreateComponentRequest$platform": "

The platform of the component.

", - "CreateContainerRecipeRequest$platformOverride": "

Specifies the operating system platform when you use a custom source image.

", + "CreateContainerRecipeRequest$platformOverride": "

Specifies the operating system platform when you use a custom base image.

", "Image$platform": "

The platform of the image.

", "ImagePipeline$platform": "

The platform of the image pipeline.

", "ImageRecipe$platform": "

The platform of the image recipe.

", @@ -1914,7 +1914,7 @@ "UserDataOverride": { "base": null, "refs": { - "AdditionalInstanceConfiguration$userDataOverride": "

Use this property to provide commands or a command script to run when you launch your build instance.

The userDataOverride property replaces any commands that Image Builder might have added to ensure that Systems Manager is installed on your Linux build instance. If you override the user data, make sure that you add commands to install Systems Manager, if it is not pre-installed on your source image.

" + "AdditionalInstanceConfiguration$userDataOverride": "

Use this property to provide commands or a command script to run when you launch your build instance.

The userDataOverride property replaces any commands that Image Builder might have added to ensure that Systems Manager is installed on your Linux build instance. If you override the user data, make sure that you add commands to install Systems Manager, if it is not pre-installed on your base image.

" } }, "VersionNumber": { @@ -1922,16 +1922,16 @@ "refs": { "Component$version": "

The version of the component.

", "ComponentSummary$version": "

The version of the component.

", - "ComponentVersion$version": "

The semantic version of the component.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", - "ContainerRecipe$version": "

The semantic version of the container recipe.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", + "ComponentVersion$version": "

The semantic version of the component.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", + "ContainerRecipe$version": "

The semantic version of the container recipe.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", "CreateComponentRequest$semanticVersion": "

The semantic version of the component. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

", "CreateContainerRecipeRequest$semanticVersion": "

The semantic version of the container recipe. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

", "CreateImageRecipeRequest$semanticVersion": "

The semantic version of the image recipe. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

", - "Image$version": "

The semantic version of the image.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", + "Image$version": "

The semantic version of the image.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", "ImageRecipe$version": "

The version of the image recipe.

", "ImageSummary$version": "

The version of the image.

", - "ImageVersion$version": "

Details for a specific version of an Image Builder image. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", - "ImportComponentRequest$semanticVersion": "

The semantic version of the component. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "ImageVersion$version": "

Details for a specific version of an Image Builder image. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

", + "ImportComponentRequest$semanticVersion": "

The semantic version of the component. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" } } } diff --git a/models/apis/transfer/2018-11-05/docs-2.json b/models/apis/transfer/2018-11-05/docs-2.json index 3a42e58aa69..012f79fed4a 100644 --- a/models/apis/transfer/2018-11-05/docs-2.json +++ b/models/apis/transfer/2018-11-05/docs-2.json @@ -90,7 +90,7 @@ "CopyStepDetails": { "base": "

Each step type has its own StepDetails structure.

", "refs": { - "WorkflowStep$CopyStepDetails": "

Details for a step that performs a file copy.

Consists of the following values:

" + "WorkflowStep$CopyStepDetails": "

Details for a step that performs a file copy.

Consists of the following values:

" } }, "CreateAccessRequest": { @@ -179,9 +179,9 @@ } }, "DeleteStepDetails": { - "base": "

The name of the step, used to identify the step that is being deleted.

", + "base": "

The name of the step, used to identify the delete step.

", "refs": { - "WorkflowStep$DeleteStepDetails": "

You need to specify the name of the file to be deleted.

" + "WorkflowStep$DeleteStepDetails": "

Details for a step that deletes the file.

" } }, "DeleteUserRequest": { @@ -305,10 +305,10 @@ } }, "EfsFileLocation": { - "base": "

Specifies the details for the file location for the file being used in the workflow. Only applicable if you are using Amazon EFS for storage.

You need to provide the file system ID and the pathname. The pathname can represent either a path or a file. This is determined by whether or not you end the path value with the forward slash (/) character. If the final character is \"/\", then your file is copied to the folder, and its name does not change. If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.

For example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder. If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today: each upload overwrites the previous version of the bob file.

", + "base": "

Reserved for future use.

", "refs": { "FileLocation$EfsFileLocation": "

Specifies the Amazon EFS ID and the path for the file being used.

", - "InputFileLocation$EfsFileLocation": "

Specifies the details for the Amazon EFS file being copied.

" + "InputFileLocation$EfsFileLocation": "

Reserved for future use.

" } }, "EfsFileSystemId": { @@ -390,7 +390,7 @@ "base": null, "refs": { "ExecutionResults$Steps": "

Specifies the details for the steps that are in the specified workflow.

", - "ExecutionResults$OnExceptionSteps": "

Specifies the steps (actions) to take if any errors are encountered during execution of the workflow.

" + "ExecutionResults$OnExceptionSteps": "

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

" } }, "ExternalId": { @@ -441,12 +441,12 @@ "HomeDirectoryMappings": { "base": null, "refs": { - "CreateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", - "CreateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", + "CreateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", + "CreateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", "DescribedAccess$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

In most cases, you can use this value instead of the session policy to lock down the associated access to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

", "DescribedUser$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

", - "UpdateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", - "UpdateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "UpdateAccessRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

", + "UpdateUserRequest$HomeDirectoryMappings": "

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" } }, "HomeDirectoryType": { @@ -856,7 +856,7 @@ "base": null, "refs": { "S3FileLocation$Bucket": "

Specifies the S3 bucket that contains the file being used.

", - "S3InputFileLocation$Bucket": "

Specifies the S3 bucket that contains the file being copied.

" + "S3InputFileLocation$Bucket": "

Specifies the S3 bucket for the customer input file.

" } }, "S3Etag": { @@ -866,13 +866,13 @@ } }, "S3FileLocation": { - "base": "

Specifies the details for the file location for the file being used in the workflow. Only applicable if you are using S3 storage.

You need to provide the bucket and key. The key can represent either a path or a file. This is determined by whether or not you end the key value with the forward slash (/) character. If the final character is \"/\", then your file is copied to the folder, and its name does not change. If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.

For example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder. If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today: each upload overwrites the previous version of the bob file.

", + "base": "

Specifies the details for the file location for the file being used in the workflow. Only applicable if you are using S3 storage.

", "refs": { "FileLocation$S3FileLocation": "

Specifies the S3 details for the file being used, such as bucket, Etag, and so forth.

" } }, "S3InputFileLocation": { - "base": "

Specifies the details for the S3 file being copied.

", + "base": "

Specifies the customer input S3 file location. If it is used inside copyStepDetails.DestinationFileLocation, it should be the S3 copy destination.

You need to provide the bucket and key. The key can represent either a path or a file. This is determined by whether or not you end the key value with the forward slash (/) character. If the final character is \"/\", then your file is copied to the folder, and its name does not change. If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.

For example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder. If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today: each upload overwrites the previous version of the bob file.

", "refs": { "InputFileLocation$S3FileLocation": "

Specifies the details for the S3 file being copied.

" } @@ -1335,10 +1335,10 @@ "WorkflowSteps": { "base": null, "refs": { - "CreateWorkflowRequest$Steps": "

Specifies the details for the steps that are in the specified workflow.

The TYPE specifies which of the following actions is being taken for this step.

For file location, you specify either the S3 bucket and key, or the EFS filesystem ID and path.

", - "CreateWorkflowRequest$OnExceptionSteps": "

Specifies the steps (actions) to take if any errors are encountered during execution of the workflow.

", + "CreateWorkflowRequest$Steps": "

Specifies the details for the steps that are in the specified workflow.

The TYPE specifies which of the following actions is being taken for this step.

Currently, copying and tagging are supported only on S3.

For file location, you specify either the S3 bucket and key, or the EFS filesystem ID and path.

", + "CreateWorkflowRequest$OnExceptionSteps": "

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

For custom steps, the lambda function needs to send FAILURE to the call back API to kick off the exception steps. Additionally, if the lambda does not send SUCCESS before it times out, the exception steps are executed.

", "DescribedWorkflow$Steps": "

Specifies the details for the steps that are in the specified workflow.

", - "DescribedWorkflow$OnExceptionSteps": "

Specifies the steps (actions) to take if any errors are encountered during execution of the workflow.

" + "DescribedWorkflow$OnExceptionSteps": "

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

" } } } diff --git a/service/imagebuilder/api.go b/service/imagebuilder/api.go index c0e36ba4cf7..18498048734 100644 --- a/service/imagebuilder/api.go +++ b/service/imagebuilder/api.go @@ -2841,7 +2841,7 @@ func (c *Imagebuilder) ListComponentBuildVersionsRequest(input *ListComponentBui // can assign values for the first three, and can filter on all of them. // // Filtering: With semantic versioning, you have the flexibility to use wildcards -// (x) to specify the most recent versions or nodes when selecting the source +// (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. // @@ -3006,7 +3006,7 @@ func (c *Imagebuilder) ListComponentsRequest(input *ListComponentsInput) (req *r // can assign values for the first three, and can filter on all of them. // // Filtering: With semantic versioning, you have the flexibility to use wildcards -// (x) to specify the most recent versions or nodes when selecting the source +// (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. // @@ -5670,7 +5670,7 @@ type AdditionalInstanceConfiguration struct { // The userDataOverride property replaces any commands that Image Builder might // have added to ensure that Systems Manager is installed on your Linux build // instance. If you override the user data, make sure that you add commands - // to install Systems Manager, if it is not pre-installed on your source image. + // to install Systems Manager, if it is not pre-installed on your base image. UserDataOverride *string `locationName:"userDataOverride" min:"1" type:"string"` } @@ -5813,7 +5813,7 @@ type AmiDistributionConfiguration struct { // can use the AMI to launch instances. LaunchPermission *LaunchPermissionConfiguration `locationName:"launchPermission" type:"structure"` - // The name of the distribution configuration. + // The name of the output AMI. Name *string `locationName:"name" min:"1" type:"string"` // The ID of an account to which you want to distribute an image. @@ -6184,7 +6184,7 @@ type Component struct { State *ComponentState `locationName:"state" type:"structure"` // The operating system (OS) version supported by the component. If the OS information - // is available, a prefix match is performed against the parent image OS version + // is available, a prefix match is performed against the base image OS version // during image recipe creation. SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` @@ -6583,7 +6583,7 @@ type ComponentSummary struct { State *ComponentState `locationName:"state" type:"structure"` // The operating system (OS) version supported by the component. If the OS information - // is available, a prefix match is performed against the parent image OS version + // is available, a prefix match is performed against the base image OS version // during image recipe creation. SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` @@ -6724,7 +6724,7 @@ type ComponentVersion struct { Platform *string `locationName:"platform" type:"string" enum:"Platform"` // he operating system (OS) version supported by the component. If the OS information - // is available, a prefix match is performed against the parent image OS version + // is available, a prefix match is performed against the base image OS version // during image recipe creation. SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` @@ -6747,7 +6747,7 @@ type ComponentVersion struct { // a software version pattern, such as 1.0.0, or a date, such as 2021.01.01. // // Filtering: With semantic versioning, you have the flexibility to use wildcards - // (x) to specify the most recent versions or nodes when selecting the source + // (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. Version *string `locationName:"version" type:"string"` @@ -6996,7 +6996,7 @@ type ContainerRecipe struct { // The owner of the container recipe. Owner *string `locationName:"owner" min:"1" type:"string"` - // The source image for the container recipe. + // The base image for the container recipe. ParentImage *string `locationName:"parentImage" min:"1" type:"string"` // The system platform for the container, such as Windows or Linux. @@ -7023,7 +7023,7 @@ type ContainerRecipe struct { // a software version pattern, such as 1.0.0, or a date, such as 2021.01.01. // // Filtering: With semantic versioning, you have the flexibility to use wildcards - // (x) to specify the most recent versions or nodes when selecting the source + // (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. Version *string `locationName:"version" type:"string"` @@ -7171,7 +7171,7 @@ type ContainerRecipeSummary struct { // The owner of the container recipe. Owner *string `locationName:"owner" min:"1" type:"string"` - // The source image for the container recipe. + // The base image for the container recipe. ParentImage *string `locationName:"parentImage" min:"1" type:"string"` // The system platform for the container, such as Windows or Linux. @@ -7297,7 +7297,7 @@ type CreateComponentInput struct { SemanticVersion *string `locationName:"semanticVersion" type:"string" required:"true"` // The operating system (OS) version supported by the component. If the OS information - // is available, a prefix match is performed against the parent image OS version + // is available, a prefix match is performed against the base image OS version // during image recipe creation. SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` @@ -7511,7 +7511,7 @@ type CreateContainerRecipeInput struct { // image. DockerfileTemplateUri *string `locationName:"dockerfileTemplateUri" type:"string"` - // Specifies the operating system version for the source image. + // Specifies the operating system version for the base image. ImageOsVersionOverride *string `locationName:"imageOsVersionOverride" min:"1" type:"string"` // A group of options that can be used to configure an instance for building @@ -7526,12 +7526,12 @@ type CreateContainerRecipeInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // The source image for the container recipe. + // The base image for the container recipe. // // ParentImage is a required field ParentImage *string `locationName:"parentImage" min:"1" type:"string" required:"true"` - // Specifies the operating system platform when you use a custom source image. + // Specifies the operating system platform when you use a custom base image. PlatformOverride *string `locationName:"platformOverride" type:"string" enum:"Platform"` // The semantic version of the container recipe. This version follows the semantic @@ -8386,9 +8386,9 @@ type CreateImageRecipeInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // The parent image of the image recipe. The value of the string can be the - // ARN of the parent image or an AMI ID. The format for the ARN follows this - // example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/x.x.x. + // The base image of the image recipe. The value of the string can be the ARN + // of the base image or an AMI ID. The format for the ARN follows this example: + // arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/x.x.x. // You can provide the specific version that you want to use, or you can use // a wildcard in all of the fields. If you enter an AMI ID for the string value, // you must have access to the AMI, and the AMI must be in the same Region in @@ -8634,15 +8634,9 @@ type CreateInfrastructureConfigurationInput struct { // InstanceProfileName is a required field InstanceProfileName *string `locationName:"instanceProfileName" min:"1" type:"string" required:"true"` - // The instance metadata options that you can set for the HTTP requests that - // pipeline builds use to launch EC2 build and test instances. For more information - // about instance metadata options, see one of the following links: - // - // * Configure the instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html) - // in the Amazon EC2 User Guide for Linux instances. - // - // * Configure the instance metadata options (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/configuring-instance-metadata-options.html) - // in the Amazon EC2 Windows Guide for Windows instances. + // The instance types of the infrastructure configuration. You can specify one + // or more instance types to use for this build. The service will pick one of + // these instance types based on availability. InstanceTypes []*string `locationName:"instanceTypes" type:"list"` // The key pair of the infrastructure configuration. You can use this to log @@ -11130,7 +11124,7 @@ type Image struct { // a software version pattern, such as 1.0.0, or a date, such as 2021.01.01. // // Filtering: With semantic versioning, you have the flexibility to use wildcards - // (x) to specify the most recent versions or nodes when selecting the source + // (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. Version *string `locationName:"version" type:"string"` @@ -11517,7 +11511,7 @@ type ImageRecipe struct { // The owner of the image recipe. Owner *string `locationName:"owner" min:"1" type:"string"` - // The parent image of the image recipe. + // The base image of the image recipe. ParentImage *string `locationName:"parentImage" min:"1" type:"string"` // The platform of the image recipe. @@ -11655,7 +11649,7 @@ type ImageRecipeSummary struct { // The owner of the image recipe. Owner *string `locationName:"owner" min:"1" type:"string"` - // The parent image of the image recipe. + // The base image of the image recipe. ParentImage *string `locationName:"parentImage" min:"1" type:"string"` // The platform of the image recipe. @@ -11998,7 +11992,7 @@ type ImageVersion struct { // a software version pattern, such as 1.0.0, or a date, such as 2021.01.01. // // Filtering: With semantic versioning, you have the flexibility to use wildcards - // (x) to specify the most recent versions or nodes when selecting the source + // (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. Version *string `locationName:"version" type:"string"` @@ -12113,7 +12107,7 @@ type ImportComponentInput struct { // can assign values for the first three, and can filter on all of them. // // Filtering: With semantic versioning, you have the flexibility to use wildcards - // (x) to specify the most recent versions or nodes when selecting the source + // (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. // @@ -12599,7 +12593,7 @@ type InstanceBlockDeviceMapping struct { // Use to manage Amazon EBS-specific configuration for this mapping. Ebs *EbsInstanceBlockDeviceSpecification `locationName:"ebs" type:"structure"` - // Use to remove a mapping from the parent image. + // Use to remove a mapping from the base image. NoDevice *string `locationName:"noDevice" type:"string"` // Use to manage instance ephemeral devices. @@ -12669,8 +12663,8 @@ func (s *InstanceBlockDeviceMapping) SetVirtualName(v string) *InstanceBlockDevi return s } -// Defines a custom source AMI and block device mapping configurations of an -// instance used for building and testing container images. +// Defines a custom base AMI and block device mapping configurations of an instance +// used for building and testing container images. type InstanceConfiguration struct { _ struct{} `type:"structure"` @@ -14732,7 +14726,7 @@ type ListImagesOutput struct { // can assign values for the first three, and can filter on all of them. // // Filtering: With semantic versioning, you have the flexibility to use wildcards - // (x) to specify the most recent versions or nodes when selecting the source + // (x) to specify the most recent versions or nodes when selecting the base // image or components for your recipe. When you use a wildcard in any node, // all nodes to the right of the first wildcard must also be wildcards. ImageVersionList []*ImageVersion `locationName:"imageVersionList" type:"list"` @@ -15823,12 +15817,12 @@ type Schedule struct { // The condition configures when the pipeline should trigger a new image build. // When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, - // and you use semantic version filters on the source image or components in - // your image recipe, EC2 Image Builder will build a new image only when there - // are new versions of the image or components in your recipe that match the - // semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will - // build a new image every time the CRON expression matches the current time. - // For semantic version syntax, see CreateComponent (https://docs.aws.amazon.com/imagebuilder/latest/APIReference/API_CreateComponent.html) + // and you use semantic version filters on the base image or components in your + // image recipe, EC2 Image Builder will build a new image only when there are + // new versions of the image or components in your recipe that match the semantic + // version filter. When it is set to EXPRESSION_MATCH_ONLY, it will build a + // new image every time the CRON expression matches the current time. For semantic + // version syntax, see CreateComponent (https://docs.aws.amazon.com/imagebuilder/latest/APIReference/API_CreateComponent.html) // in the EC2 Image Builder API Reference. PipelineExecutionStartCondition *string `locationName:"pipelineExecutionStartCondition" type:"string" enum:"PipelineExecutionStartCondition"` diff --git a/service/transfer/api.go b/service/transfer/api.go index 1a983ddbbe1..7b2bc741431 100644 --- a/service/transfer/api.go +++ b/service/transfer/api.go @@ -3779,8 +3779,7 @@ type CreateAccessInput struct { // // The following is an Entry and Target pair example. // - // [ { "Entry": "your-personal-report.pdf", "Target": "/bucket3/customized-reports/${transfer:UserName}.pdf" - // } ] + // [ { "Entry": "/directory1", "Target": "/bucket_name/home/mydirectory" } ] // // In most cases, you can use this value instead of the session policy to lock // down your user to the designated home directory ("chroot"). To do this, you @@ -4343,8 +4342,7 @@ type CreateUserInput struct { // // The following is an Entry and Target pair example. // - // [ { "Entry": "your-personal-report.pdf", "Target": "/bucket3/customized-reports/${transfer:UserName}.pdf" - // } ] + // [ { "Entry": "/directory1", "Target": "/bucket_name/home/mydirectory" } ] // // In most cases, you can use this value instead of the session policy to lock // your user down to the designated home directory ("chroot"). To do this, you @@ -4617,8 +4615,12 @@ type CreateWorkflowInput struct { // A textual description for the workflow. Description *string `type:"string"` - // Specifies the steps (actions) to take if any errors are encountered during - // execution of the workflow. + // Specifies the steps (actions) to take if errors are encountered during execution + // of the workflow. + // + // For custom steps, the lambda function needs to send FAILURE to the call back + // API to kick off the exception steps. Additionally, if the lambda does not + // send SUCCESS before it times out, the exception steps are executed. OnExceptionSteps []*WorkflowStep `type:"list"` // Specifies the details for the steps that are in the specified workflow. @@ -4634,6 +4636,8 @@ type CreateWorkflowInput struct { // // * Tag: add a tag to the file // + // Currently, copying and tagging are supported only on S3. + // // For file location, you specify either the S3 bucket and key, or the EFS filesystem // ID and path. // @@ -5108,7 +5112,7 @@ func (s DeleteSshPublicKeyOutput) GoString() string { return s.String() } -// The name of the step, used to identify the step that is being deleted. +// The name of the step, used to identify the delete step. type DeleteStepDetails struct { _ struct{} `type:"structure"` @@ -6604,8 +6608,8 @@ type DescribedWorkflow struct { // Specifies the text description for the workflow. Description *string `type:"string"` - // Specifies the steps (actions) to take if any errors are encountered during - // execution of the workflow. + // Specifies the steps (actions) to take if errors are encountered during execution + // of the workflow. OnExceptionSteps []*WorkflowStep `type:"list"` // Specifies the details for the steps that are in the specified workflow. @@ -6673,21 +6677,7 @@ func (s *DescribedWorkflow) SetWorkflowId(v string) *DescribedWorkflow { return s } -// Specifies the details for the file location for the file being used in the -// workflow. Only applicable if you are using Amazon EFS for storage. -// -// You need to provide the file system ID and the pathname. The pathname can -// represent either a path or a file. This is determined by whether or not you -// end the path value with the forward slash (/) character. If the final character -// is "/", then your file is copied to the folder, and its name does not change. -// If, rather, the final character is alphanumeric, your uploaded file is renamed -// to the path value. In this case, if a file with that name already exists, -// it is overwritten. -// -// For example, if your path is shared-files/bob/, your uploaded files are copied -// to the shared-files/bob/, folder. If your path is shared-files/today, each -// uploaded file is copied to the shared-files folder and named today: each -// upload overwrites the previous version of the bob file. +// Reserved for future use. type EfsFileLocation struct { _ struct{} `type:"structure"` @@ -6909,8 +6899,8 @@ func (s *ExecutionError) SetType(v string) *ExecutionError { type ExecutionResults struct { _ struct{} `type:"structure"` - // Specifies the steps (actions) to take if any errors are encountered during - // execution of the workflow. + // Specifies the steps (actions) to take if errors are encountered during execution + // of the workflow. OnExceptionSteps []*ExecutionStepResult `min:"1" type:"list"` // Specifies the details for the steps that are in the specified workflow. @@ -7335,7 +7325,7 @@ func (s *ImportSshPublicKeyOutput) SetUserName(v string) *ImportSshPublicKeyOutp type InputFileLocation struct { _ struct{} `type:"structure"` - // Specifies the details for the Amazon EFS file being copied. + // Reserved for future use. EfsFileLocation *EfsFileLocation `type:"structure"` // Specifies the details for the S3 file being copied. @@ -9179,18 +9169,6 @@ func (s *ResourceNotFoundException) RequestID() string { // Specifies the details for the file location for the file being used in the // workflow. Only applicable if you are using S3 storage. -// -// You need to provide the bucket and key. The key can represent either a path -// or a file. This is determined by whether or not you end the key value with -// the forward slash (/) character. If the final character is "/", then your -// file is copied to the folder, and its name does not change. If, rather, the -// final character is alphanumeric, your uploaded file is renamed to the path -// value. In this case, if a file with that name already exists, it is overwritten. -// -// For example, if your path is shared-files/bob/, your uploaded files are copied -// to the shared-files/bob/, folder. If your path is shared-files/today, each -// uploaded file is copied to the shared-files folder and named today: each -// upload overwrites the previous version of the bob file. type S3FileLocation struct { _ struct{} `type:"structure"` @@ -9251,11 +9229,24 @@ func (s *S3FileLocation) SetVersionId(v string) *S3FileLocation { return s } -// Specifies the details for the S3 file being copied. +// Specifies the customer input S3 file location. If it is used inside copyStepDetails.DestinationFileLocation, +// it should be the S3 copy destination. +// +// You need to provide the bucket and key. The key can represent either a path +// or a file. This is determined by whether or not you end the key value with +// the forward slash (/) character. If the final character is "/", then your +// file is copied to the folder, and its name does not change. If, rather, the +// final character is alphanumeric, your uploaded file is renamed to the path +// value. In this case, if a file with that name already exists, it is overwritten. +// +// For example, if your path is shared-files/bob/, your uploaded files are copied +// to the shared-files/bob/, folder. If your path is shared-files/today, each +// uploaded file is copied to the shared-files folder and named today: each +// upload overwrites the previous version of the bob file. type S3InputFileLocation struct { _ struct{} `type:"structure"` - // Specifies the S3 bucket that contains the file being copied. + // Specifies the S3 bucket for the customer input file. Bucket *string `min:"3" type:"string"` // The name assigned to the file when it was created in S3. You use the object @@ -10396,8 +10387,7 @@ type UpdateAccessInput struct { // // The following is an Entry and Target pair example. // - // [ { "Entry": "your-personal-report.pdf", "Target": "/bucket3/customized-reports/${transfer:UserName}.pdf" - // } ] + // [ { "Entry": "/directory1", "Target": "/bucket_name/home/mydirectory" } ] // // In most cases, you can use this value instead of the session policy to lock // down your user to the designated home directory ("chroot"). To do this, you @@ -10923,8 +10913,7 @@ type UpdateUserInput struct { // // The following is an Entry and Target pair example. // - // [ { "Entry": "your-personal-report.pdf", "Target": "/bucket3/customized-reports/${transfer:UserName}.pdf" - // } ] + // [ { "Entry": "/directory1", "Target": "/bucket_name/home/mydirectory" } ] // // In most cases, you can use this value instead of the session policy to lock // down your user to the designated home directory ("chroot"). To do this, you @@ -11352,7 +11341,7 @@ type WorkflowStep struct { // // * A description // - // * An S3 or EFS location for the destination of the file copy. + // * An S3 location for the destination of the file copy. // // * A flag that indicates whether or not to overwrite an existing file of // the same name. The default is FALSE. @@ -11363,7 +11352,7 @@ type WorkflowStep struct { // Consists of the lambda function name, target, and timeout (in seconds). CustomStepDetails *CustomStepDetails `type:"structure"` - // You need to specify the name of the file to be deleted. + // Details for a step that deletes the file. DeleteStepDetails *DeleteStepDetails `type:"structure"` // Details for a step that creates one or more tags.