diff --git a/.gitignore b/.gitignore index 1917cf5f0..9fca97731 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .vscode +.idea .pyc .zip .DS_Store @@ -134,4 +135,4 @@ venv.bak/ dmypy.json # Pyre type checker -.pyre/ \ No newline at end of file +.pyre/ diff --git a/docs/providers-guide.md b/docs/providers-guide.md new file mode 100644 index 000000000..3031dbb59 --- /dev/null +++ b/docs/providers-guide.md @@ -0,0 +1,448 @@ +# Providers Guide + +Provider types and their properties can be defined as default config for a +pipeline. But also at the stage level of a pipeline to structure the source, +build, test, approval, deploy or invoke actions. + +Provider types and properties defined in the stage of a pipeline override the +default type that was defined for that pipeline. +Provider types are the basic building blocks of the ADF pipeline creation +process and allow for flexibility and abstraction over AWS CodePipeline +Providers and Actions. + +## Index + +- [Source](#source) + - [CodeCommit](#codecommit) + - [GitHub](#github) + - [S3](#s3) +- [Build](#build) + - [CodeBuild](#codebuild) + - [Jenkins](#jenkins) +- [Deploy](#deploy) + - [Approval](#approval) + - [CodeBuild](#codebuild-1) + - [CodeDeploy](#codedeploy) + - [CloudFormation](#cloudformation) + - [Lambda](#lambda) + - [Service Catalog](#service-catalog) + - [S3](#s3-1) + +## Source + +```yaml +default_providers: + source: + provider: codecommit|github|s3 + properties: + # All provider specific properties go here. +``` + +### CodeCommit + +Use CodeCommit as a source to trigger your pipeline. +The repository can also be hosted in another account. + +Provider type: `codecommit`. + +#### Properties + +- *account_id* - *(String)* **(required)** + > The AWS Account ID where the Source Repository is located, if the + > repository does not exist it will be created via AWS CloudFormation on the + > source account along with the associated cross account CloudWatch event + > action to trigger the pipeline. +- *repository* - *(String)* defaults to name of the pipeline. + > The AWS CodeCommit repository name. +- *branch* - *(String)* default: `master`. + > The Branch on the CodeCommit repository to use to trigger this specific + > pipeline. +- *poll_for_changes* - *(Boolean)* default: `False`. + > If CodePipeline should poll the repository for changes, defaults to + > False in favor of Amazon EventBridge events. + > + > As the name implies, when polling for changes it will check the + > repository for updates every minute or so. This will show up as actions in + > CloudTrail. + > + > By default, it will not poll for changes but use the event triggered by + > CodeCommit when an update to the repository took place instead. +- *owner* - *(String)* default: `AWS`. + > Can be either `AWS` *(default)*, `ThirdParty`, or `Custom`. + > Further information on the use of the owner attribute can be found in the + > [CodePipeline documentation](https://docs.aws.amazon.com/codepipeline/latest/APIReference/API_ActionTypeId.html). +- *role* - *(String)* default ADF managed role. + > The role to use to fetch the contents of the CodeCommit repository. + > Only specify when you need a specific role to access it. By default ADF + > will use its own role to access it instead. + +### GitHub + +Use GitHub as a source to trigger your pipeline. +The repository can also be hosted in another account. + +Provider type: `github`. + +#### Properties + +- *repository* - *(String)* defaults to name of the pipeline. + > The GitHub repository name. + > For example, for the ADF repository it would be: + > `aws-deployment-framework`. +- *branch* - *(String)* - default: `master`. + > The Branch on the GitHub repository to use to trigger this specific + > pipeline. +- *owner* - *(String)* **(required)** + > The name of the GitHub user or organization who owns the GitHub repository. + > For example, for the ADF repository that would be: `awslabs`. +- *oauth_token_path* - *(String)* **(required)** + > The OAuth token path in AWS Secrets Manager on the Deployment Account that + > holds the GitHub OAuth token used to create the web hook as part of the + > pipeline. Read the CodePipeline documentation for more + > [information on configuring GitHub OAuth](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference-GitHub.html#action-reference-GitHub-auth). +- *json_field* - *(String)* **(required)** + > The name of the JSON key in the object that is stored in AWS Secrets + > Manager that holds the OAuth Token. + +### S3 + +S3 can use used as the source for a pipeline too. + +Please note: you can use S3 as a source and deployment provider. The properties +that are available are slightly different. + +The role used to fetch the object from the S3 bucket is: +`arn:aws:iam::${source_account_id}:role/adf-codecommit-role`. + +Provider type: `s3`. + +#### Properties + +- *account_id* - *(String)* **(required)** + > The AWS Account ID where the source S3 Bucket is located. +- *bucket_name* - *(String)* **(required)** + > The Name of the S3 Bucket that will be the source of the pipeline. +- *object_key* - *(String)* **(required)** + > The Specific Object within the bucket that will trigger the pipeline + > execution. + +## Build + +```yaml +default_providers: + build: + provider: codebuild|jenkins + # Optional: enabled. + # The build stage is enabled by default. + # If you wish to disable the build stage within a pipeline, set it to + # False instead, like this: + enabled: False + properties: + # All provider specific properties go here. +``` + +### CodeBuild + +CodeBuild is the default Build provider. +It will be provided the assets as produced by the source provider. +At the end of the CodeBuild execution, output assets can be configured +such that these can be deployed in the deployment phase. + +CodeBuild can also be configured as a deployment provider. +For more information on this, scroll down to [Deploy / CodeBuild](#codebuild-1). +In terms of the properties, the following properties will be usable for running +CodeBuild as a Build and Deploy provider. + +Provider type: `codebuild`. + +#### Properties + +- *image* *(String)* - default: `UBUNTU_14_04_PYTHON_3_7_1`. + > The Image that the AWS CodeBuild will use. + > Images can be found [here](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-codebuild.LinuxBuildImage.html). + > + > Image can also take an object that contains a property key of + > `repository_arn` which is the repository ARN of an ECR repository on the + > deployment account within the main deployment region. This allows your + > pipeline to consume a custom image if required. + > Along with `repository_arn`, we also support a `tag` key which can be used + > to define which image should be used (defaults to `latest`). +- *size* *(String)* **(small|medium|large)** - default: `small`. + > The Compute type to use for the build, types can be found + > [here](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). +- *environment_variables* *(Object)* defaults to empty object. + > Any Environment Variables you wish to be available within the build stage + > for this pipeline. These are to be passed in as Key/Value pairs. + > + > For example: + ```yaml + environment_variables: + MY_ENV_VAR: some value + ANOTHER_ENV_VAR: another value + ``` +- *role* *(String)* default: `adf-codebuild-role`. + > If you wish to pass a custom IAM Role to use for the Build stage of this + > pipeline. Alternatively, you can change the `adf-codebuild-role` with + > additional permissions and conditions in the `global-iam.yml` file as + > documented in the [User Guide](./user-guide.md). +- *timeout* *(Number)* in minutes, default: `20`. + > If you wish to define a custom timeout for the Build stage. +- *privileged* *(Boolean)* default: `False`. + > If you plan to use this build project to build Docker images and the + > specified build environment is not provided by CodeBuild with Docker + > support, set Privileged to `True`. + > Otherwise, all associated builds that attempt to interact with the + > Docker daemon fail. +- *spec_inline* *(String)* defaults to use the Buildspec file instead. + > If you wish to pass in a custom inline Buildspec as a string for the + > CodeBuild Project this would override any `buildspec.yml` file. + > Read more [here](https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-example). +- *spec_filename* *(String)* default: `buildspec.yml`. + > If you wish to pass in a custom Buildspec file that is within the + > repository. This is useful for custom deploy type actions where CodeBuild + > will perform the execution of the commands. Path is relational to the + > root of the repository, so `build/buidlspec.yml` refers to the + > `buildspec.yml` stored in the `build` directory of the repository. + +### Jenkins + +Jenkins can be configured as the build provider, where it will be triggered +as part of the CodePipeline deployed by ADF. + +To use Jenkins as a Build provider, you will need to install the +[Jenkins Plugin as documented here](https://wiki.jenkins.io/display/JENKINS/AWS+CodePipeline+Plugin). + +Provider type: `jenkins`. + +#### Properties + +- *project_name* *(String)* **(required)** + > The Project name in Jenkins used for this Build. +- *server_url* *(String)* **(required)** + > The Server URL of your Jenkins Instance. +- *provider_name* *(String)* **(required)** + > The Provider name that was setup in the Jenkins Plugin for AWS CodePipeline. + +## Deploy + +```yaml +default_providers: + deploy: + provider: cloudformation|codedeploy|s3|service_catalog|codebuild|lambda + properties: + # All provider specific properties go here. +``` + +### Approval + +The approval provider enables you to await further execution until a key +decision maker (either person or automated process) approved +continuation of the deployment. + +```yaml + provider: approval + properties: + # All provider specific properties go here. +``` + +#### Properties + +- *message* *(String)* - default: `Approval stage for ${pipeline_name}`. + > The message you would like to include as part of the approval stage. +- *notification_endpoint* *(String)* + > An email or slack channel (see [User Guide docs](./user-guide.md)) that you + > would like to send the notification to. +- *sns_topic_arn* *(String)* - default is no additional SNS notification. + > A SNS Topic ARN you would like to receive a notification as part of the + > approval stage. + +### CodeBuild + +CodeBuild can also be configured as a deployment provider. + +However, it cannot be used to target specific accounts or regions. +When you specify a CodeBuild deployment step, the step should not target +multiple accounts or regions. + +As the CodeBuild tasks will run inside the deployment account only. +Using the CodeBuild as a deployment step enables you to run integration tests +or deploy using CLI tools instead. + +When CodeBuild is also configured as the build provider, it is useful to +specify a different `spec_filename` like `'deployspec.yml'` or +`'testspec.yml'`. + +In case you would like to use CodeBuild to target specific accounts or regions, +you will need to make use of the environment variables to pass in the relevant +target information, while keeping the logic to assume into the correct +role, region and account in the Buildspec specification file as configured +by the `spec_filename` property. + +Provider type: `codebuild`. + +#### Properties + +See [Build / CodeBuild properties](#codebuild) above. + +### CodeDeploy + +Provider type: `codedeploy`. + +#### Properties + +- *application_name* *(String)* **(required)** + > The name of the CodeDeploy Application you want to use for this deployment. +- *deployment_group_name* *(String)* **(required)** + > The name of the Deployment Group you want to use for this deployment. +- *role* - *(String)* default `arn:aws:iam::${target_account_id}:role/adf-cloudformation-role`. + > The role you would like to use on the target AWS account to execute the + > CodeDeploy action. The role should allow the CodeDeploy service to assume + > it. As is [documented in the CodeDeploy service role documentation](https://docs.aws.amazon.com/codedeploy/latest/userguide/getting-started-create-service-role.html). + +### CloudFormation + +Useful to deploy CloudFormation templates using a specific or ADF generated +IAM Role in the target environment. + +When you are using CDK, you can synthesize the CDK code into a CloudFormation +template and target that in this stage to get it deployed. This will ensure +that the code is compiled with least privileges and can only be deployed using +the specific CloudFormation role in the target environment. + +CloudFormation is the default action for deployments. + +It will fetch the template to deploy from the previous stage its output +artifacts. If you are specific on which files to include in the output +artifacts be sure to include the `params/*.json` files and the CloudFormation +template that you wish to deploy. + +Provider type: `cloudformation`. + +#### Properties + +- *stack_name* - *(String)* default: `${ADF_STACK_PREFIX}${PIPELINE_NAME}`. + > The name of the CloudFormation Stack to use. + > + > The default `ADF_STACK_PREFIX` is `adf-`. This is configurable as part of + > the `StackPrefix` parameter in the `deployment/global.yml` stack. + > If the pipeline name is `some-pipeline`, the CloudFormation stack would + > be named: `adf-some-pipeline` by default. Unless you overwrite the value + > using this property, in which case it will use the exact value as + > specified. + > + > By setting this to a specific value, you can adopt a stack that was created + > using CloudFormation before. It can also help to name the stack according + > to the internal naming convention at your organization. +- *template_filename* - *(String)* default: `template.yml`. + > The name of the CloudFormation Template file to use. + > Changing the template file name to use allows you to generate multiple + > templates, where a specific template is used according to its specific + > target environment. For example: `template_prod.yml` for production stages. +- *root_dir* - *(String)* default to empty string. + > The root directory in which the CloudFormation template and `params` + > directory reside. Example, when the CloudFormation template is stored in + > `infra/custom_template.yml` and parameter files in the + > `infra/params` directory, set `template_filename` to + > `'custom_template.yml'` and `root_dir` to `'infra'`. + > + > Defaults to empty string, the root of the source repository or input + > artifact. +- *role* - *(String)* default `arn:aws:iam::${target_account_id}:role/adf-cloudformation-deployment-role`. + > The role you would like to use on the target AWS account to execute the + > CloudFormation action. Ensure that the CloudFormation service should be + > allowed to assume that role. +- *action* - *(CHANGE_SET_EXECUTE|CHANGE_SET_REPLACE|CREATE_UPDATE|DELETE_ONLY|REPLACE_ON_FAILURE)* default: `CHANGE_SET_EXECUTE`. + > The CloudFormation action type you wish to use for this specific pipeline + > or stage. For more information on actions, see the + > [supported actions of CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-action-reference.html#w2ab1c13c13b9). +- *outputs* - *(String)* **(Required when using Parameter Overrides)** defaults to none. + > The outputs from the CloudFormation Stack creation. Required if you are + > using Parameter Overrides as part of the pipeline. +- *change_set_approval* - *(Boolean)* **(Stage Level Only)** + > If the stage should insert a manual approval stage between the creation of + > the change set and the execution of it. This is only possible when the + > target region to deploy to is in the same region as where the deployment + > pipelines reside. In other words, if the main region is set to `eu-west-1`, + > the `change_set_approval` can only be set on targets for `eu-west-1`. + > + > In case you would like to target other regions, split it in three stages + > instead. First stage, using `cloudformation` as the deployment provider, + > with `action` set to `'CHANGE_SET_REPLACE'`. This will create the Change + > Set, but not execute it. Add a `approval` stage next, and the default + > `cloudformation` stage after. The latter will create a new change set and + > execute it accordingly. +- *param_overrides* - *(List of Objects)* **(Stage Level Only)** defaults to none. + - *inputs* *(String)* + > The input artifact name you want to pass into this stage to take a + > parameter override from. + - *param* *(String)* + > The name of the CloudFormation Parameter you want to override in the + > specific stage. + - *key_name* *(String)* + > The key name from the stack output that you wish to use as the input + > in this stage. + +### Lambda + +Invoke a Lambda function as a deployment step. + +Only Lambda functions deployed in the deployment account can be invoked. +Lambda cannot be used to target other accounts or regions. + +Provider type: `lambda`. + +#### Properties + +- *function_name* *(String)* **(required)** + > The name of the Lambda function to invoke. + > + > For example: `myLambdaFunction`. +- *input* *(Object|List|String)* defaults to empty string. + > An object to pass into the Lambda function as its input event. + > This input will be object stringified. + +### Service Catalog + +Service Catalog deployment provider. + +The role used to deploy the service catalog is: +`arn:aws:iam::${target_account_id}:role/adf-cloudformation-role`. + +Provider type: `service_catalog`. + +#### Properties + +- *product_id* - *(String)* **(required)** + > The Product ID of the Service Catalog Product to deploy. +- *configuration_file_path* - *(String)* default: `params/${account-name}_${region}.json` + > If you wish to pass a custom path to the configuration file path. + +### S3 + +S3 can use used to deploy with too. + +S3 cannot be used to target multiple accounts or regions in one stage. +As the `bucket_name` property needs to be defined and these are globally +unique across all AWS accounts. In case you would like to deploy to multiple +accounts you will need to configure multiple stages in the pipeline manually +instead. Where each will target the specific bucket in the target account. + +Please note: you can use S3 as a source and deployment provider. The properties +that are available are slightly different. + +The role used to upload the object(s) to the S3 bucket is: +`arn:aws:iam::${target_account_id}:role/adf-cloudformation-role`. + +Provider type: `s3`. + +#### Properties + +- *bucket_name* - *(String)* **(required)** + > The name of the S3 Bucket to deploy to. +- *object_key* - *(String)* **(required)** + > The object key within the bucket to deploy to. +- *extract* - *(Boolean)* default: `False`. + > Whether CodePipeline should extract the contents of the object when + > it deploys it. +- *role* - *(String)* default: `arn:aws:iam::${target_account_id}:role/adf-cloudformation-role`. + > The role you would like to use for this action. diff --git a/docs/types-guide.md b/docs/types-guide.md deleted file mode 100644 index b24e594fb..000000000 --- a/docs/types-guide.md +++ /dev/null @@ -1,162 +0,0 @@ -# Types Guide - -Types can be defined at the top level or at a stage level of a pipeline to structure the source, build, test, approval, deploy or invoke actions. Types defined in the stage of a pipeline override a default type that was defined at a top level. Types are the basic building blocks of the ADF pipeline creation process and allow for flexibility and abstraction over AWS CodePipeline Providers and Actions. - -## Source - -```yaml -default_providers: - source: - provider: codecommit|github|s3 - properties: ... -``` - -#### Properties - -- **codecommit** - - account_id - *(String)* **(required)** - > The AWS Account ID where the Source Repository is located, if the repository does not exist it will be created via AWS CloudFormation on the source account along with the associated cross account CloudWatch event action to trigger the pipeline. - - repository - *(String)* - > The AWS CodeCommit repository name. defaults to the same name as the pipeline. - - branch - *(String)* - > The Branch on the CodeCommit repository to use to trigger this specific pipeline. Defaults to master. - - poll_for_changes - *(Boolean)* - > If CodePipeline should poll the repository for changes, defaults to false in favor of CloudWatch events. -- **github** - - repository - *(String)* **(required)** - > The GitHub repository name. - - owner - *(String)* **(required)** - > The Owner of the GitHub repository. - - oauth_token_path - *(String)* **(required)** - > The oauth token path in AWS Secrets Manager on the Deployment Account that holds the GitHub oAuth token used to create the Webhook as part of the pipeline. - - json_field - *(String)* **(required)** - > The name of the JSON key in the object that is stored in AWS Secrets Manager that holds the oAuth Token. - - branch - *(String)* - > The Branch on the GitHub repository to use to trigger this specific pipeline. Defaults to master. -- **s3** - - account_id - *(String)* **(required)** - > The AWS Account ID where the source S3 Bucket is located. - - bucket_name - *(String)* **(required)** - > The Name of the S3 Bucket that will be the source of the pipeline. - - object_key - *(String)* **(required)** - > The Specific Object within the bucket that will trigger the pipeline execution. - -## Build - -```yaml -default_providers: - build: - provider: codebuild|jenkins - enabled: False # If you wish to disable the build stage within a pipeline, defaults to True. - properties: ... -``` - -#### Properties - -- **codebuild** - - image *(String)* - > The Image that the AWS CodeBuild will use. Images can be found [here](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-codebuild.LinuxBuildImage.html). Defaults to UBUNTU_14_04_PYTHON_3_7_1. Image can also take an object that contains a property key of *repository_arn* which is the repository ARN of an ECR repository on the deloyment account within the main deployment region. This allows your pipeline to consume a custom image if required. Along with *repository_arn*, we also support a *tag* key which can be used to define which image should be used (defaults to *latest*). - - size *(String)* **(small|medium|large)** - > The Compute type to use for the build, types can be found [here](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html). Defaults to *small*. - - environment_variables *(Object)* - > Any Environment Variables you wish to be available within the build stage for this pipeline. These are to be passed in as Key/Value pairs. - - role *(String)* - > If you wish to pass a custom IAM Role to use for the Build stage of this pipeline. Defaults to *adf-codebuild-role*. - - timeout *(Number)* - > If you wish to define a custom timeout for the Build stage. Defaults to 20 minutes. - - privileged *(Boolean)* - > If you plan to use this build project to build Docker images and the specified build environment is not provided by CodeBuild with Docker support, set Privileged to True. Otherwise, all associated builds that attempt to interact with the Docker daemon fail. Defaults to False. - - spec_inline *(String)* - > If you wish to pass in a custom inline Buildspec as a string for the CodeBuild Project which would override any buildspec.yml file. Read more [here](https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-example). Defaults to None. - - spec_filename *(String)* - > If you wish to pass in a custom Buildspec file that is within the repository. This is useful for custom deploy type actions where CodeBuild will perform the execution of the commands. Defaults to the buildspec.yml within the repository. - -- **jenkins** ([Jenkins Plugin](https://wiki.jenkins.io/display/JENKINS/AWS+CodePipeline+Plugin)) - - project_name *(String)* **(required)** - > The Project name in Jenkins used for this Build. - - server_url *(String)* **(required)** - > The Server URL of your Jenkins Instance. - - provider_name *(String)* **(required)** - > The Provider name that was setup in the Jenkins Plugin for AWS CodePipeline. - -## Approval - -```yaml -provider: approval -properties: ... -``` - -#### Properties - -- **approval** - - message *(String)* - > The Message you would like to include as part of the Approval stage. - - notification_endpoint *(String)* - > An email or slack channel *(see docs)* that you would like to send the notification to. - - sns_topic_arn *(String)* - > Any SNS Topic ARN you would like to receive a notification as part of the Approval stage stage. - -## Deploy - -```yaml -default_providers: - deploy: - provider: cloudformation|codedeploy|s3|service_catalog|codebuild|lambda - properties: ... -``` - -#### Properties - -- **cloudformation** - - stack_name - *(String)* - > The name of the CloudFormation Stack. - - template_filename - *(String)* - > The name of the CloudFormation Template to execute. Defaults to template.yml. - - root_dir - *(String)* - > The root directory in which the CloudFormation template and params directory reside. Example, when the CloudFormation template is stored in 'infra/custom_template.yml' and parameter files in the 'infra/params' directory, set template_filename to 'custom_template.yml' and root_dir to 'infra'. Defaults to '' (empty string), root of source repository or input artifact. - - role - *(String)* - > The role you would like to use on the target AWS account to execute the CloudFormtion action. - - action - *(String)* - > The CloudFormation action type you wish to use for this specific pipeline or stage. For more information on actions, see [here](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-action-reference.html#w2ab1c13c13b9). - - outputs - *(String)* - > The outputs from the CloudFormation Stack creation. Required if you are using Parameter Overrides as part of the pipeline. - - change_set_approval - *(Boolean)* **(Stage Level Only)** - > If the stage should insert a manual approval stage between the creation of the change set and the execution of it. - - param_overrides - *(List of Objects)* **(Stage Level Only)** - - inputs *(String)* - > The input you want to pass into this stage to take a parameter override from. - - param *(String)* - > The name of the Parameter you want to override in the specific stage. - - key_name *(String)* - > The Key name from the stack output you wish to use as input in this stage. - - -- **codedeploy** - - application_name *(String)* **(required)** - > The name of the CodeDeploy Application you want to use for this deployment. - - deployment_group_name *(String)* **(required)** - > The name of the Deployment Group you want to use for this deployment. - - role - *(String)* - > The role you would like to use on the target AWS account to execute the CodeDeploy action. - -- **s3** - - bucket_name - *(String)* **(required)** - > The Name of the S3 Bucket that will be the source of the pipeline. - - object_key - *(String)* **(required)** - > The Specific Object within the bucket that will trigger the pipeline execution. - - extract - *(Boolean)* - > If CodePipeline should extract the contents of the Object when it deploys it. - - role - *(String)* - > The role you would like to use for this action. - -- **service_catalog** - - product_id - *(String)* **(required)** - > What is the Product ID of the Service Catalog Product to Deploy. - - configuration_file_path - *(String)* - > If you wish to pass a custom path to the configuration file path. Defaults to the account-name_region.json pattern used for CloudFormation Parameter files. - -- **lambda** - - function_name *(String)* **(required)** - > The name of the Lambda Function to invoke. - - input *(String)* - > An Object to pass into the Function as input. This input will be object stringified. diff --git a/docs/user-guide.md b/docs/user-guide.md index 05cd1d89b..e554a792a 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -1,13 +1,13 @@ # User Guide - [Deployment Map](#deployment-map) - - [Types](#types) + - [Providers](#providers) - [Targets Syntax](#targets-syntax) - [Params](#params) - [Repositories](#repositories) - [Completion Triggers](#completion-triggers) - [Additional Deployment Maps](#additional-deployment-maps) - - [Removing Pipelines](#serverless-transforms) + - [Removing Pipelines](#removing-pipelines) - [Deploying via Pipelines](#deploying-via-pipelines) - [BuildSpec](#buildspec) - [Parameters and Tagging](#cloudformation-parameters-and-tagging) @@ -20,11 +20,20 @@ ## Deployment Map -The deployment_map.yml file *(or [files](#additional-deployment-maps))* lives in the repository named *aws-deployment-framework-pipelines* on the Deployment Account. These files are the general pipeline definitions that are responsible for mapping the specific pipelines to their deployment targets along with their respective parameters. The [AWS CDK](https://docs.aws.amazon.com/cdk/latest/guide/home.html) will synthesize during the CodeBuild step within the *aws-deployment-framework-pipelines* pipeline. Prior to the CDK creating these pipeline templates, a input generation step will run to parse the deployment_map.yml files, it will then assume a readonly role on the master account in the Organization that will have access to resolve the accounts in the AWS Organizations OU's specified in the mapping file. It will return the account name and ID for each of the accounts and pass those values into the input files that will go on to be main CDK applications inputs. +The `deployment_map.yml` file *(or [files](#additional-deployment-maps))* lives in the repository named `aws-deployment-framework-pipelines` on the Deployment Account. These files are the general pipeline definitions that are responsible for mapping the specific pipelines to their deployment targets along with their respective parameters. The [AWS CDK](https://docs.aws.amazon.com/cdk/latest/guide/home.html) will synthesize during the CodeBuild step within the `aws-deployment-framework-pipelines` pipeline. Prior to the CDK creating these pipeline templates, a input generation step will run to parse the deployment_map.yml files, it will then assume a readonly role on the master account in the Organization that will have access to resolve the accounts in the AWS Organizations OU's specified in the mapping file. It will return the account name and ID for each of the accounts and pass those values into the input files that will go on to be main CDK applications inputs. -The Deployment Map file defines the pipelines along with their inputs, types and parameters. it also defines the targets of the pipeline within a list type structure. Each entry in the *'targets'* key list represents a stage within the pipeline that will be created. The Deployment map files also allow for some unique steps and actions to occur in your pipeline. You can add an approval step to your pipeline by putting a step in your targets definition titled, *'approval'* this will add a manual approval stage at this point in your pipeline. +The deployment map file defines the pipelines along with their inputs, +providers to use and their configuration. It also defines the targets of the +pipeline within a list type structure. -A basic example of a *deployment_map.yml* would look like the following: +Each entry in the `'targets'` key list represents a stage within the pipeline +that will be created. The deployment map files also allow for some unique steps +and actions to occur in your pipeline. For example, you can add an approval +step to your pipeline by putting a step in your targets definition titled, +`'approval'`. This will add a manual approval stage at this point in your +pipeline. + +A basic example of a `deployment_map.yml` would look like the following: ```yaml pipelines: @@ -60,15 +69,15 @@ pipelines: name: fancy-name #Optional way to pass a name for this stage in the pipeline ``` -In the above example we are creating two pipelines with AWS CodePipeline. The first one will deploy from a repository named **iam** that lives in the account **123456789101**. This CodeCommit Repository will automatically be created by default in the 123456789101 AWS Account if it does not exist. The automatic repository creation occurs if you enable *'auto-create-repositories'* (which is enabled by default). The pipeline *iam* pipeline will use AWS CodeCommit as its source and deploy in 3 steps. The first stage of the deployment will occur against all AWS Accounts that are in the `/security` Organization unit and be targeted to the `eu-west-1` region. After that, there is a manual approval phase which is denoted by the keyword `approval`. The next step will be targeted to the accounts within the `/banking/testing` OU *(in your default deployment account region)* region. By providing a simple path without a region definition it will default to the region chosen as the deployment account region in your [adfconfig](./admin-guide/adfconfig.yml). Any failure during the pipeline will cause it to halt. +In the above example we are creating two pipelines with AWS CodePipeline. The first one will deploy from a repository named **iam** that lives in the account **123456789101**. This CodeCommit Repository will automatically be created by default in the 123456789101 AWS Account if it does not exist. The automatic repository creation occurs if you enable `'auto-create-repositories'` (which is enabled by default). The `iam` pipeline will use AWS CodeCommit as its source and deploy in 3 steps. The first stage of the deployment will occur against all AWS Accounts that are in the `/security` Organization unit and be targeted to the `eu-west-1` region. After that, there is a manual approval phase which is denoted by the keyword `approval`. The next step will be targeted to the accounts within the `/banking/testing` OU *(in your default deployment account region)* region. By providing a simple path without a region definition it will default to the region chosen as the deployment account region in your [adfconfig](./admin-guide/adfconfig.yml). Any failure during the pipeline will cause it to halt. -The second pipeline (*vpc*) example deploys to an OU path `/banking/testing`. You can choose between an absolute path in your AWS Organization, AWS Account ID or an array of OUs or IDs. This pipeline also uses Github as a source rather than AWS CodeCommit. When generating the pipeline, ADF expects [GitHub Token](https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line) to be placed in AWS Secrets Manager in a path prefixed with **/adf/**. +The second pipeline (*vpc*) example deploys to an OU path `/banking/testing`. You can choose between an absolute path in your AWS Organization, AWS Account ID or an array of OUs or IDs. This pipeline also uses Github as a source rather than AWS CodeCommit. When generating the pipeline, ADF expects [GitHub Token](https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line) to be placed in AWS Secrets Manager in a path prefixed with `/adf/`. By default, the above pipelines will be created to deploy CloudFormation using a change in two actions *(Create then Execute)*. #### Targeting via Tags -Tags on AWS Accounts can also be used to define stages within a pipeline. For example, we might want to create a pipeline that targets all AWS Accounts with the tag **cost-center** and value of **foo-team**. *path/target* and *tags* should not be used in combination. +Tags on AWS Accounts can also be used to define stages within a pipeline. For example, we might want to create a pipeline that targets all AWS Accounts with the tag `cost-center` and value of `foo-team`. You cannot use a combination of `path/target` and `tags`. We do that with the following syntax: @@ -100,9 +109,11 @@ example `012345671234`, it will treat it as a octal number instead. Since this cannot be detected without making risky assumptions, the deployment will error to be on the safe side instead. -### Types +### Providers -The ADF comes with an extensive set of abstractions over CodePipeline providers that can be used to define pipelines. For example, see the below pipeline definition: +The ADF comes with an extensive set of abstractions over CodePipeline providers +that can be used to define pipelines. For example, see the following pipeline +definition: ```yaml pipelines: @@ -124,11 +135,30 @@ pipelines: deployment_group_name: testing-sample # https://docs.aws.amazon.com/codedeploy/latest/userguide/deployment-groups.html ``` -The pipeline *sample-ec2-java-app-codedeploy* has a *default_providers* top level key that defines the high level structure of the pipeline. It explicitly defines the source *(requirement for all pipelines)* and also defines what type of build will occur along with any associated parameters. In this example, we're explicitly saying we want to use AWS CodeBuild *(which is also the default)* and also to use a specific Docker Image for build stage. The deploy type is also defined at the top level of this pipeline, in this case *codedeploy*. This means that any of the targets of the pipeline will use AWS CodeDeploy as their default [Deployment Provider](https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers). +The pipeline `sample-ec2-java-app-codedeploy` has a `default_providers` key +that defines the high-level structure of the pipeline. +It explicitly defines the source *(requirement for all pipelines)* and also +defines what type of build will occur along with any associated parameters. + +In this example, we're explicitly saying we want to use AWS CodeBuild +*(which is also the default)* and also to use a specific Docker Image for build +stage. The default deployment provider for this pipeline is configured to be +`codedeploy` in this example. This means that any of the targets of the +pipeline will use AWS CodeDeploy as their default +[Deployment Provider](https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#actions-valid-providers). + +In the targets section itself we have the opportunity to override the provider +itself or pass in any additional properties to that provider. In this example +we are passing in `application_name` and *deployment_group_name* as properties +to CodeDeploy for this specific stage. The `properties` can either be defined +by changing the `default_providers` configuration or get updated at the stage +level. Stage level config overrides default provider config. -In the targets section itself we have the opportunity to override the provider itself or pass in any additional properties to that provider. In this example we are passing in *application_name* and *deployment_group_name* as properties to CodeDeploy for this specific stage. properties can either be defined at the top level or used in the stage level to override top level values. By default, the build type is AWS CodeBuild and the deploy type is AWS CloudFormation. +By default, the build provider is AWS CodeBuild and the deployment provider is +AWS CloudFormation. -For detailed information on types, see the [types guide](./types-guide.md). +For detailed information on providers and their supported properties, see the +[providers guide](./providers-guide.md). ### Targets Syntax @@ -149,7 +179,7 @@ targets: - target: 9999999999 # Target and Path keys can be used interchangeably regions: eu-west-1 name: my-special-account # Defaults to adf-cloudformation-deployment-role - provider: some_provider # If you intend to override the provider for this stage (see types guide for available providers) + provider: some_provider # If you intend to override the provider for this stage (see providers guide for available providers) properties: my_prop: my_value # If you intend to pass properties to this specific stage - path: /my_ou/production # This can also be an array of OUs or AWS Account IDs @@ -165,13 +195,13 @@ Pipelines also have parameters that don't relate to a specific stage but rather The following are the available pipeline parameters: -- **notification_endpoint** +- *notification_endpoint* *(String)* defaults to none. > Can either be a valid email address or a string that represents the name of a Slack Channel. In order to integrate ADF with Slack see [Integrating with Slack](./admin-guide.md) in the admin guide. By Default, Notifications will be sent when pipelines Start, Complete or Fail. -- **schedule** +- *schedule* *(String)* defaults to none. > If the Pipeline should execute on a specific Schedule. Schedules are defined by using a Rate or an Expression. See [here](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#RateExpressions) for more information on how to define Rate or an Expression. -- **restart_execution_on_update** *(Boolean)* +- *restart_execution_on_update* *(Boolean)* default: `False`. > If the Pipeline should start a new execution if its structure is updated. Pipelines can often update their structure if targets of the pipeline are Organizational Unit paths. This setting allows pipelines to automatically run once an AWS Account has been moved in or out of a targeted OU. ### Completion Triggers @@ -248,7 +278,7 @@ In the example we have three steps to our install phase in our build, the remain Other packages such as [cfn-lint](https://github.com/awslabs/cfn-python-lint) can be installed in order to validate that our CloudFormation templates are up to standard and do not contain any obvious errors. If you wish to add in any extra packages you can add them to the *requirements.txt* in the `bootstrap_repository` which is brought down into AWS CodeBuild and installed. Otherwise you can add them into any pipelines specific buildspec.yml. -If you wish to hide away the steps that can occur in AWS CodeBuild, you can move the *buildspec.yml* content itself into the pipeline by using the *inline_spec* property in your map files. By doing this, you can remove the option to have a buildspec.yml in the source repository at all. This is a potential way to enforce certain build steps for certain pipeline types. +If you wish to hide away the steps that can occur in AWS CodeBuild, you can move the *buildspec.yml* content itself into the pipeline by using the *spec_inline* property in your map files. By doing this, you can remove the option to have a buildspec.yml in the source repository at all. This is a potential way to enforce certain build steps for certain pipeline types. #### Custom Build Images You can use [custom build](https://aws.amazon.com/blogs/devops/extending-aws-codebuild-with-custom-build-environments/) environments in AWS CodeBuild. This can be defined in the your deployment map files like so: diff --git a/requirements.txt b/requirements.txt index 9ca946cdb..6fae53f6f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,9 @@ tox==2.2.1 pylint==2.2.2 pytest==3.0.7 +isort==4.3.21 mock==2.0.0 boto3~=1.10, >=1.10.47 pyyaml>=5.1 astroid==2.1.0 -schema==0.7.1 \ No newline at end of file +schema==0.7.1 diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-accounts/readme.md b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-accounts/readme.md index b14c2e006..7f9e5d298 100644 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-accounts/readme.md +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-accounts/readme.md @@ -18,6 +18,7 @@ The OU name is the name of the direct parent of the account. If you want to move - Create and update account alias. - Account tagging. - Allow the account access to view its own billing. +- Set up support subscriptions during account provisioning ### Currently not supported @@ -25,6 +26,7 @@ The OU name is the name of the direct parent of the account. If you want to move - Updating account email addresses - Removing accounts - Handling root account credentials and MFA +- Changing the support subscription of an account. ### Configuration Parameters @@ -33,6 +35,9 @@ The OU name is the name of the direct parent of the account. If you want to move - `email`: Email associated by the account, must be valid otherwise it is not possible to access as root user when needed - `delete_default_vpc`: `True|False` if Default VPCs need to be delete from all AWS Regions. - `allow_billing`: `True|False` if the account see its own costs within the organization. +- `support_level`: `basic|enterprise` ADF will raise a ticket to add the account to an existing AWS support subscription when an account is created. Currently only supports basic or enterprise. + **NB: This is for activating enterprise support on account creation only. As a prerequisite your organization master account must already have enterprise support activated** + - `alias`: AWS account alias. Must be unique globally otherwise cannot be created. Check [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/console_account-alias.html) for further details. If the account alias is not created or already exists, in the Federation login page, no alias will be presented - `tags`: list of tags associate to the account. @@ -47,6 +52,7 @@ accounts: email: prod-team-1@company.com allow_billing: False delete_default_vpc: True + support_level: enterprise alias: prod-company-1 tags: - created_by: adf @@ -62,6 +68,7 @@ accounts: email: test-team-1@company.com allow_billing: True delete_default_vpc: False + support_level: basic alias: test-company-11 tags: - created_by: adf diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-bootstrap/deployment/global.yml b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-bootstrap/deployment/global.yml index 0cfcd89fc..d5baab6e2 100644 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-bootstrap/deployment/global.yml +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-bootstrap/deployment/global.yml @@ -664,10 +664,13 @@ Resources: build: commands: - cdk --version - - chmod 755 adf-build/cdk/execute_pipeline_stacks.py adf-build/cdk/generate_pipeline_inputs.py adf-build/cdk/generate_pipeline_stacks.py + - chmod 755 adf-build/cdk/execute_pipeline_stacks.py adf-build/cdk/generate_pipeline_inputs.py adf-build/cdk/generate_pipeline_stacks.py adf-build/cdk/clean_pipelines.py - python adf-build/cdk/generate_pipeline_inputs.py - cdk synth --app adf-build/cdk/generate_pipeline_stacks.py 1> /dev/null - python adf-build/cdk/execute_pipeline_stacks.py + post_build: + commands: + - python adf-build/cdk/clean_pipelines.py ServiceRole: !GetAtt PipelineProvisionerCodeBuildRole.Arn Tags: - Key: "Name" diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/main.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/main.py index d1d4850b9..7cf0466f6 100755 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/main.py +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/main.py @@ -9,7 +9,7 @@ import os from concurrent.futures import ThreadPoolExecutor import boto3 -from src import read_config_files, delete_default_vpc +from src import read_config_files, delete_default_vpc, Support from organizations import Organizations from logger import configure_logger from parameter_store import ParameterStore @@ -26,6 +26,7 @@ def main(): return LOGGER.info(f"Found {len(accounts)} account(s) in configuration file(s).") organizations = Organizations(boto3) + support = Support(boto3) all_accounts = organizations.get_accounts() parameter_store = ParameterStore(os.environ.get('AWS_REGION', 'us-east-1'), boto3) adf_role_name = parameter_store.fetch_parameter('cross_account_access_role') @@ -34,10 +35,10 @@ def main(): account_id = next(acc["Id"] for acc in all_accounts if acc["Name"] == account.full_name) except StopIteration: # If the account does not exist yet.. account_id = None - create_or_update_account(organizations, account, adf_role_name, account_id) + create_or_update_account(organizations, support, account, adf_role_name, account_id) -def create_or_update_account(org_session, account, adf_role_name, account_id=None): +def create_or_update_account(org_session, support_session, account, adf_role_name, account_id=None): """Creates or updates a single AWS account. :param org_session: Instance of Organization class :param account: Instance of Account class @@ -45,12 +46,15 @@ def create_or_update_account(org_session, account, adf_role_name, account_id=Non if not account_id: LOGGER.info(f'Creating new account {account.full_name}') account_id = org_session.create_account(account, adf_role_name) + # This only runs on account creation at the moment. + support_session.set_support_level_for_account(account, account_id) + sts = STS() role = sts.assume_cross_account_role( 'arn:aws:iam::{0}:role/{1}'.format( account_id, adf_role_name - ), 'delete_default_vpc' + ), 'adf_account_provisioning' ) LOGGER.info(f'Ensuring account {account_id} (alias {account.alias}) is in OU {account.ou_path}') diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/__init__.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/__init__.py index 95ca44064..8f1a9c905 100755 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/__init__.py +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/__init__.py @@ -7,3 +7,4 @@ from .configparser import read_config_files from .vpc import delete_default_vpc from .account import Account +from .support import Support, SupportLevel diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/account.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/account.py index 75eb0fdda..a94fedba1 100755 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/account.py +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/account.py @@ -17,6 +17,7 @@ def __init__( delete_default_vpc=False, allow_direct_move_between_ou=False, allow_billing=True, + support_level='basic', tags=None ): self.full_name = full_name @@ -26,6 +27,7 @@ def __init__( self.allow_direct_move_between_ou = allow_direct_move_between_ou self.allow_billing = allow_billing self.alias = alias + self.support_level = support_level if tags is None: self.tags = {} @@ -51,6 +53,9 @@ def load_from_config(cls, config): allow_billing=config.get( "allow_billing", True), + support_level=config.get( + "support_level", + 'basic'), tags=config.get( "tags", {})) diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/support.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/support.py new file mode 100644 index 000000000..7e98cb2d4 --- /dev/null +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/provisioner/src/support.py @@ -0,0 +1,135 @@ +"""Support module used throughout the ADF +""" +from enum import Enum +from botocore.config import Config +from botocore.exceptions import ClientError, BotoCoreError +from logger import configure_logger +from .account import Account + + +LOGGER = configure_logger(__name__) + + +class SupportLevel(Enum): + BASIC = "basic" + DEVELOPER = "developer" + BUSINESS = "business" + ENTERPRISE = "enterprise" + + +class Support: # pylint: disable=R0904 + """Class used for accessing AWS Support API + """ + _config = Config(retries=dict(max_attempts=30)) + + def __init__(self, role): + self.client = role.client("support", region_name='us-east-1', config=Support._config) + + def get_support_level(self) -> SupportLevel: + """ + Gets the AWS Support Level of the current Account + based on the Role passed in during the init of the Support class. + + :returns: + SupportLevels Enum defining the level of AWS support. + + :raises: + ClientError + BotoCoreError + + """ + try: + severity_levels = self.client.get_severity_levels()['severityLevels'] + available_support_codes = [level['code'] for level in severity_levels] + + # See: https://aws.amazon.com/premiumsupport/plans/ for insights into the interpretation of + # the available support codes. + + if 'critical' in available_support_codes: # Business Critical System Down Severity + return SupportLevel.ENTERPRISE + if 'urgent' in available_support_codes: # Production System Down Severity + return SupportLevel.BUSINESS + if 'low' in available_support_codes: # System Impaired Severity + return SupportLevel.DEVELOPER + + return SupportLevel.BASIC + + except (ClientError, BotoCoreError) as e: + if e.response["Error"]["Code"] == "SubscriptionRequiredException": + LOGGER.info('Enterprise Support is not enabled') + return SupportLevel.BASIC + raise + + def set_support_level_for_account(self, account: Account, account_id: str, current_level: SupportLevel = SupportLevel.BASIC): + """ + Sets the support level for the account. If the current_value is the same as the value in the instance + of the account Class it will not create a new ticket. + + Currently only supports "basic|enterprise" tiers. + + :param account: Instance of Account class + :param account_id: AWS Account ID of the account that will have support configured for it. + :param current_level: SupportLevel value that represents the current support tier of the account (Default: Basic) + :return: Void + :raises: ValueError if account.support_level is not a valid/supported SupportLevel. + """ + desired_level = SupportLevel(account.support_level) + + if desired_level is current_level: + LOGGER.info(f'Account {account.full_name} ({account_id}) already has {desired_level.value} support enabled.') + + elif desired_level is SupportLevel.ENTERPRISE: + LOGGER.info(f'Enabling {desired_level.value} for Account {account.full_name} ({account_id})') + self._enable_support_for_account(account, account_id, desired_level) + + else: + LOGGER.error(f'Invalid support tier configured: {desired_level.value}. ' + f'Currently only "{SupportLevel.BASIC.value}" or "{SupportLevel.ENTERPRISE.value}" ' + 'are accepted.', exc_info=True) + raise ValueError(f'Invalid Support Tier Value: {desired_level.value}') + + def _enable_support_for_account(self, account: Account, account_id, desired_level: SupportLevel): + """ + Raises a support ticket in the organization root account, enabling support for the account specified + by account_id. + + :param account: Instance of Account class + :param account_id: AWS Account ID, of the account that will have support configured + :param desired_level: Desired Support Level + :return: Void + :raises: ClientError, BotoCoreError. + """ + try: + cc_email = account.email + subject = f'[ADF] Enable {desired_level.value} Support for account: {account_id}' + body = ( + f'Hello, \n' + f'Can {desired_level.value} support be enabled on Account: {account_id} ({account.email}) \n' + 'Thank you!\n' + '(This ticket was raised automatically via ADF)' + + ) + LOGGER.info(f'Creating AWS Support ticket. {desired_level.value} Support for Account ' + f'{account.full_name}({account_id})') + + response = self.client.create_case( + subject=subject, + serviceCode='account-management', + severityCode='low', + categoryCode='billing', + communicationBody=body, + ccEmailAddresses=[ + cc_email, + ], + language='en', + ) + + LOGGER.info(f'AWS Support ticket: {response["caseId"]} ' + f'has been created. {desired_level.value} Support has ' + f'been requested on Account {account.full_name} ({account_id}). ' + f'{account.email} has been CCd') + + except (ClientError, BotoCoreError): + LOGGER.error(f'Failed to enable {desired_level.value} support for account: ' + f'{account.full_name} ({account.alias}): {account_id}', exc_info=True) + raise diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/cdk/clean_pipelines.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/cdk/clean_pipelines.py new file mode 100755 index 000000000..386f388a1 --- /dev/null +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/cdk/clean_pipelines.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +"""This file is pulled into CodeBuild containers + and used to remove stale SSM Parameter Store entries and delete + the CloudFormation stacks for pipelines that are no longer defined + in the deployment map(s) +""" + +import os +import boto3 + +from s3 import S3 +from logger import configure_logger +from deployment_map import DeploymentMap +from cloudformation import CloudFormation +from parameter_store import ParameterStore + + +LOGGER = configure_logger(__name__) +DEPLOYMENT_ACCOUNT_REGION = os.environ["AWS_REGION"] +DEPLOYMENT_ACCOUNT_ID = os.environ["ACCOUNT_ID"] +MASTER_ACCOUNT_ID = os.environ["MASTER_ACCOUNT_ID"] +ORGANIZATION_ID = os.environ["ORGANIZATION_ID"] +ADF_PIPELINE_PREFIX = os.environ["ADF_PIPELINE_PREFIX"] +SHARED_MODULES_BUCKET = os.environ["SHARED_MODULES_BUCKET"] +ADF_VERSION = os.environ["ADF_VERSION"] +ADF_LOG_LEVEL = os.environ["ADF_LOG_LEVEL"] + +def clean(parameter_store, deployment_map): + """ + Function used to remove stale entries in Parameter Store and + Deployment Pipelines that are no longer in the Deployment Map + """ + current_pipeline_parameters = parameter_store.fetch_parameters_by_path( + '/deployment/') + + parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3) + cloudformation = CloudFormation( + region=DEPLOYMENT_ACCOUNT_REGION, + deployment_account_region=DEPLOYMENT_ACCOUNT_REGION, + role=boto3 + ) + stacks_to_remove = [] + for parameter in current_pipeline_parameters: + name = parameter.get('Name').split('/')[-2] + if name not in [p.get('name') for p in deployment_map.map_contents['pipelines']]: + LOGGER.info(f'Deleting {parameter.get("Name")}') + parameter_store.delete_parameter(parameter.get('Name')) + stacks_to_remove.append(name) + + for stack in list(set(stacks_to_remove)): + cloudformation.delete_stack("{0}{1}".format( + ADF_PIPELINE_PREFIX, + stack + )) + + +def main(): + LOGGER.info('ADF Version %s', ADF_VERSION) + LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL) + + parameter_store = ParameterStore( + DEPLOYMENT_ACCOUNT_REGION, + boto3 + ) + + s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET) + deployment_map = DeploymentMap( + parameter_store, + s3, + ADF_PIPELINE_PREFIX + ) + + LOGGER.info(f'Cleaning Stale Deployment Map entries') + clean(parameter_store, deployment_map) + + +if __name__ == '__main__': + main() diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/cdk/generate_pipeline_inputs.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/cdk/generate_pipeline_inputs.py index 1ac1269b2..18acbaa25 100755 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/cdk/generate_pipeline_inputs.py +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/cdk/generate_pipeline_inputs.py @@ -6,26 +6,23 @@ """This file is pulled into CodeBuild containers and used to build the pipeline cloudformation stack inputs """ - -import os import json +import os from thread import PropagatingThread -import boto3 +import boto3 +from cache import Cache +from deployment_map import DeploymentMap +from errors import ParameterNotFoundError +from logger import configure_logger +from organizations import Organizations +from parameter_store import ParameterStore from pipeline import Pipeline from repo import Repo from rule import Rule -from target import Target, TargetStructure from s3 import S3 -from logger import configure_logger -from errors import ParameterNotFoundError -from deployment_map import DeploymentMap -from cache import Cache -from cloudformation import CloudFormation -from organizations import Organizations from sts import STS -from parameter_store import ParameterStore - +from target import Target, TargetStructure LOGGER = configure_logger(__name__) DEPLOYMENT_ACCOUNT_REGION = os.environ["AWS_REGION"] @@ -38,34 +35,6 @@ ADF_LOG_LEVEL = os.environ["ADF_LOG_LEVEL"] -def clean(parameter_store, deployment_map): - """ - Function used to remove stale entries in Parameter Store and - Deployment Pipelines that are no longer in the Deployment Map - """ - current_pipeline_parameters = parameter_store.fetch_parameters_by_path( - '/deployment/') - - parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3) - cloudformation = CloudFormation( - region=DEPLOYMENT_ACCOUNT_REGION, - deployment_account_region=DEPLOYMENT_ACCOUNT_REGION, - role=boto3 - ) - stacks_to_remove = [] - for parameter in current_pipeline_parameters: - name = parameter.get('Name').split('/')[-2] - if name not in [p.get('name') for p in deployment_map.map_contents['pipelines']]: - parameter_store.delete_parameter(parameter.get('Name')) - stacks_to_remove.append(name) - - for stack in list(set(stacks_to_remove)): - cloudformation.delete_stack("{0}{1}".format( - ADF_PIPELINE_PREFIX, - stack - )) - - def ensure_event_bus_status(organization_id): _events = boto3.client('events') _events.put_permission( @@ -190,7 +159,6 @@ def main(): ), 'pipeline' ) organizations = Organizations(role) - clean(parameter_store, deployment_map) ensure_event_bus_status(ORGANIZATION_ID) try: auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories') diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/deployment_map.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/deployment_map.py index 6d2959650..2ebd17409 100644 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/deployment_map.py +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/deployment_map.py @@ -81,13 +81,9 @@ def _get_all(self): self.map_contents = {} self.map_contents['pipelines'] = [] if os.path.isdir(self.map_dir_path): - for file in os.listdir(self.map_dir_path): - if file.endswith(".yml") and file != 'example-deployment_map.yml': - self.determine_extend_map( - self._read('{0}/{1}'.format(self.map_dir_path, file)) - ) + self._process_dir(self.map_dir_path) self.determine_extend_map( - self._read() # Calling with default no args to get deployment_map.yml in root if it exists + self._read() # Calling with default no args to get deployment_map.yml in root if it exists ) if not self.map_contents['pipelines']: LOGGER.error( @@ -95,3 +91,16 @@ def _get_all(self): "You can create additional deployment maps if required in a folder named deployment_maps with any name (ending in .yml)" ) raise InvalidDeploymentMapError("No Deployment Map files found..") from None + + def _process_dir(self, path): + files = [os.path.join(path, f) for f in os.listdir(path)] + for filename in files: + LOGGER.info(f"Processing {filename} in path {path}") + if os.path.isdir(filename): + self._process_dir(filename) + elif filename.endswith(".yml") and filename != "example-deployment_map.yml": + self.determine_extend_map( + self._read(filename) + ) + else: + LOGGER.warning(f"{filename} is not a directory and doesn't end in.yml") diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/s3.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/s3.py index 09b3675f5..58ed7cae9 100644 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/s3.py +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/s3.py @@ -74,22 +74,84 @@ def build_pathing_style(self, style, key): "virtual-hosted.".format(style=style) ) - def put_object(self, key, file_path, style="path", pre_check=False): """ - Put the object into S3 and return the S3 URL of the object + Put the object into S3 and return the reference to the object + in the requested path style. + + Args: + key (str): The S3 object key to check and/or write to. + + file_path (str): The file to upload using binary write mode. + + style (str): The path style to use when returning the S3 object + location. Valid values are listed in this class using the + static method: supported_path_styles. + + pre_check (bool): Whether or not to check if the file exists + in the S3 bucket already. When set to True, it will only + upload if the object does not exist yet. When set to False + it will always perform the upload, whether the object already + exists or not. Be aware, the contents of the object and the + given file are not compared. Only whether the given object key + exists in the bucket or not. + + Returns: + str: The S3 object reference in the requested path style. This + will be returned regardless of whether or not an upload was + performed or not. In case the object key existed before, and + pre_check was set to True, calling this function will only + return the reference path to the object. """ - if pre_check: - try: - self.client.get_object(Bucket=self.bucket, Key=key) - except self.client.exceptions.NoSuchKey: - LOGGER.info("Uploading %s as %s to S3 Bucket %s in %s", file_path, key, self.bucket, self.region) - self.resource.Object(self.bucket, key).put(Body=open(file_path, 'rb')) - finally: - return self.build_pathing_style(style, key) #pylint: disable=W0150 - self.resource.Object(self.bucket, key).put(Body=open(file_path, 'rb')) + # Do we need to upload the file to the bucket? + # If we don't need to check first, do. Otherwise, check if it exists + # first and only upload if it does not exist. + if not pre_check or not self._does_object_exist(key): + self._perform_put_object(key, file_path) return self.build_pathing_style(style, key) + def _does_object_exist(self, key): + """ + Check whether the given S3 object key exists in this bucket or not. + + Args: + key (str): The S3 object key to check. + + Returns: + bool: True when the object exists, False when it does not. + """ + try: + self.client.get_object(Bucket=self.bucket, Key=key) + return True + except self.client.exceptions.NoSuchKey: + return False + + def _perform_put_object(self, key, file_path): + """ + Perform actual put operation without any checks. + This is called internally by the put_object method when the + requested file needs to be uploaded. + + Args: + key (str): They S3 key of the object to put the file contents to. + + file_path (str): The file to upload using binary write mode. + """ + try: + LOGGER.info( + "Uploading %s as %s to S3 Bucket %s in %s", + file_path, + key, + self.bucket, + self.region, + ) + with open(file_path, 'rb') as file_handler: + self.resource.Object(self.bucket, key).put(Body=file_handler) + LOGGER.debug("Upload of %s was successful.", key) + except BaseException: + LOGGER.error("Failed to upload %s", key, exc_info=True) + raise + def read_object(self, key): s3_object = self.resource.Object(self.bucket, key) return s3_object.get()['Body'].read().decode('utf-8') diff --git a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/tests/test_s3.py b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/tests/test_s3.py index 965b86b34..61c069a23 100644 --- a/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/tests/test_s3.py +++ b/src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/tests/test_s3.py @@ -3,11 +3,10 @@ # pylint: skip-file -import os -import boto3 +import botocore +from botocore.stub import Stubber from pytest import fixture, raises -from stubs import stub_s3 -from mock import Mock +from mock import Mock, patch, mock_open from s3 import S3 @@ -150,3 +149,263 @@ def test_build_pathing_style_unknown_style(us_east_1_cls): error_message = str(excinfo.value) assert error_message.find(correct_error_message) >= 0 + + +@patch('s3.S3.build_pathing_style') +@patch('s3.S3._perform_put_object') +@patch('s3.S3._does_object_exist') +def test_put_object_no_checks_always_upload(does_exist, perform_put, + build_path, eu_west_1_cls): + object_key = "some" + object_path = "s3://bucket/{key}".format(key=object_key) + file_path = "some_imaginary_file.json" + path_style = "s3-url" + does_exist.return_value = True + build_path.return_value = object_path + + return_value = eu_west_1_cls.put_object( + key=object_key, + file_path=file_path, + style=path_style, + pre_check=False, + ) + + assert return_value == object_path + + does_exist.assert_not_called() + perform_put.assert_called_once_with(object_key, file_path) + build_path.assert_called_once_with(path_style, object_key) + + +@patch('s3.S3.build_pathing_style') +@patch('s3.S3._perform_put_object') +@patch('s3.S3._does_object_exist') +def test_put_object_do_check_upload_when_missing( + does_exist, perform_put, build_path, eu_west_1_cls): + object_key = "some" + object_path = "s3://bucket/{key}".format(key=object_key) + file_path = "some_imaginary_file.json" + path_style = "s3-url" + does_exist.return_value = False + build_path.return_value = object_path + + return_value = eu_west_1_cls.put_object( + key=object_key, + file_path=file_path, + style=path_style, + pre_check=True, + ) + + assert return_value == object_path + + does_exist.assert_called_once_with(object_key) + perform_put.assert_called_once_with(object_key, file_path) + build_path.assert_called_once_with(path_style, object_key) + + +@patch('s3.S3.build_pathing_style') +@patch('s3.S3._perform_put_object') +@patch('s3.S3._does_object_exist') +def test_put_object_do_check_no_upload_object_present( + does_exist, perform_put, build_path, eu_west_1_cls): + object_key = "some" + object_path = "s3://bucket/{key}".format(key=object_key) + file_path = "some_imaginary_file.json" + path_style = "s3-url" + does_exist.return_value = True + build_path.return_value = object_path + + return_value = eu_west_1_cls.put_object( + key=object_key, + file_path=file_path, + style=path_style, + pre_check=True, + ) + + assert return_value == object_path + + does_exist.assert_called_once_with(object_key) + perform_put.assert_not_called() + build_path.assert_called_once_with(path_style, object_key) + + +@patch('s3.boto3.client') +def test_does_object_exist_yes(boto3_client): + s3_client = botocore.session.get_session().create_client('s3') + s3_client_stubber = Stubber(s3_client) + boto3_client.return_value = s3_client + object_key = "some" + + s3_cls = S3( + 'eu-west-1', + 'some_bucket' + ) + response = {} + expected_params = { + 'Bucket': s3_cls.bucket, + 'Key': object_key, + } + s3_client_stubber.add_response('get_object', response, expected_params) + s3_client_stubber.activate() + + assert s3_cls._does_object_exist(key=object_key) + + boto3_client.assert_called_once_with('s3', region_name='eu-west-1') + s3_client_stubber.assert_no_pending_responses() + + +@patch('s3.boto3.client') +def test_does_object_exist_no(boto3_client): + s3_client = botocore.session.get_session().create_client('s3') + s3_client_stubber = Stubber(s3_client) + boto3_client.return_value = s3_client + object_key = "some" + + s3_cls = S3( + 'eu-west-1', + 'some_bucket' + ) + s3_client_stubber.add_client_error( + 'get_object', + expected_params={'Bucket': s3_cls.bucket, 'Key': object_key}, + http_status_code=404, + service_error_code='NoSuchKey', + ) + s3_client_stubber.activate() + + assert not s3_cls._does_object_exist(key=object_key) + + boto3_client.assert_called_once_with('s3', region_name='eu-west-1') + s3_client_stubber.assert_no_pending_responses() + + +@patch('s3.boto3.resource') +@patch('s3.LOGGER') +def test_perform_put_object_success(logger, boto3_resource): + s3_resource = Mock() + s3_object = Mock() + s3_resource.Object.return_value = s3_object + boto3_resource.return_value = s3_resource + object_key = "some" + file_path = "some-file.json" + file_data = 'some file data' + + s3_cls = S3( + 'eu-west-1', + 'some_bucket' + ) + with patch("builtins.open", mock_open(read_data=file_data)) as mock_file: + s3_cls._perform_put_object( + key=object_key, + file_path=file_path, + ) + mock_file.assert_called_with(file_path, 'rb') + s3_resource.Object.assert_called_once_with(s3_cls.bucket, object_key) + s3_object.put.assert_called_once_with(Body=mock_file.return_value) + + logger.info.assert_called_once_with( + "Uploading %s as %s to S3 Bucket %s in %s", + file_path, + object_key, + s3_cls.bucket, + s3_cls.region, + ) + logger.debug.assert_called_once_with( + "Upload of %s was successful.", + object_key, + ) + logger.error.assert_not_called() + boto3_resource.assert_called_with('s3', region_name='eu-west-1') + + +@patch('s3.boto3.resource') +@patch('s3.LOGGER') +def test_perform_put_object_no_such_file(logger, boto3_resource): + s3_resource = Mock() + s3_object = Mock() + s3_resource.Object.return_value = s3_object + boto3_resource.return_value = s3_resource + object_key = "some" + file_path = "some-file.json" + + s3_cls = S3( + 'eu-west-1', + 'some_bucket' + ) + correct_error_message = "File not found exception" + with patch("builtins.open") as mock_file: + mock_file.side_effect = Exception(correct_error_message) + with raises(Exception) as excinfo: + s3_cls._perform_put_object( + key=object_key, + file_path=file_path, + ) + + error_message = str(excinfo.value) + assert error_message.find(correct_error_message) >= 0 + + mock_file.assert_called_with(file_path, 'rb') + s3_resource.Object.assert_not_called() + s3_object.put.assert_not_called() + + logger.info.assert_called_once_with( + "Uploading %s as %s to S3 Bucket %s in %s", + file_path, + object_key, + s3_cls.bucket, + s3_cls.region, + ) + logger.debug.assert_not_called() + logger.error.assert_called_once_with( + "Failed to upload %s", + object_key, + exc_info=True, + ) + boto3_resource.assert_called_with('s3', region_name='eu-west-1') + + +@patch('s3.boto3.resource') +@patch('s3.LOGGER') +def test_perform_put_object_failed(logger, boto3_resource): + s3_resource = Mock() + s3_object = Mock() + s3_resource.Object.return_value = s3_object + boto3_resource.return_value = s3_resource + object_key = "some" + file_path = "some-file.json" + file_data = 'some file data' + + s3_cls = S3( + 'eu-west-1', + 'some_bucket' + ) + correct_error_message = "Test exception" + s3_object.put.side_effect = Exception(correct_error_message) + with patch("builtins.open", mock_open(read_data=file_data)) as mock_file: + with raises(Exception) as excinfo: + s3_cls._perform_put_object( + key=object_key, + file_path=file_path, + ) + + error_message = str(excinfo.value) + assert error_message.find(correct_error_message) >= 0 + + mock_file.assert_called_with(file_path, 'rb') + s3_resource.Object.assert_called_once_with(s3_cls.bucket, object_key) + s3_object.put.assert_called_once_with(Body=mock_file.return_value) + + logger.info.assert_called_once_with( + "Uploading %s as %s to S3 Bucket %s in %s", + file_path, + object_key, + s3_cls.bucket, + s3_cls.region, + ) + logger.debug.assert_not_called() + logger.error.assert_called_once_with( + "Failed to upload %s", + object_key, + exc_info=True, + ) + boto3_resource.assert_called_with('s3', region_name='eu-west-1')