From 1539ceed27cf7e2354535df9d5b34588f51c918b Mon Sep 17 00:00:00 2001 From: enriquh Date: Mon, 9 Dec 2024 18:32:54 +0100 Subject: [PATCH] Adding support for nested stack generation in QSAssetsCFNSynthesizer and also improved code and readme file --- .gitignore | 3 + README.md | 104 +- deploy.py | 122 +- ...AWSCloudFormationStackSetExecutionRole.yml | 7 +- .../CFNStacks/deploymentAccount_template.yaml | 72 +- .../deploymentAccount_template_delegated.yaml | 634 +++++++++ .../CFNStacks/firstStageAccount_template.yaml | 47 +- .../createTemplateFromAnalysis.py | 1202 +++++++++++++---- .../helpers/analysis.py | 7 +- .../helpers/datasources.py | 2 + ...yaml => datasource_resource_CFN_skel.yaml} | 0 .../resources/dest_CFN_skel.yaml | 2 +- .../resources/dest_parent_CFN_skel.yaml | 22 + .../resources/nested_stack_CFN_skel.yaml | 8 + .../resources/source_CFN_skel.yaml | 12 + .../resources/template_CFN_skel.yaml | 45 - .../resources/template_resource_CFN_skel.yaml | 31 + .../resources/theme_resource_CFN_skel.yaml | 23 + 18 files changed, 1925 insertions(+), 418 deletions(-) create mode 100644 deployment/CFNStacks/deploymentAccount_template_delegated.yaml rename source/lambda/qs_assets_CFN_synthesizer/resources/{datasource_CFN_skel.yaml => datasource_resource_CFN_skel.yaml} (100%) create mode 100644 source/lambda/qs_assets_CFN_synthesizer/resources/dest_parent_CFN_skel.yaml create mode 100644 source/lambda/qs_assets_CFN_synthesizer/resources/nested_stack_CFN_skel.yaml create mode 100644 source/lambda/qs_assets_CFN_synthesizer/resources/source_CFN_skel.yaml delete mode 100644 source/lambda/qs_assets_CFN_synthesizer/resources/template_CFN_skel.yaml create mode 100644 source/lambda/qs_assets_CFN_synthesizer/resources/template_resource_CFN_skel.yaml create mode 100644 source/lambda/qs_assets_CFN_synthesizer/resources/theme_resource_CFN_skel.yaml diff --git a/.gitignore b/.gitignore index 5a1289d..e7d1e81 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ ### vsCode ### .vscode +**/**.code-workspace ### macOS ### # General @@ -17,6 +18,7 @@ **/node_modules **/dist +**/__pycache__ **/build **/.DS_Store **/.angular @@ -25,3 +27,4 @@ cdk.out **/data/**.csv .env +workspace/ diff --git a/README.md b/README.md index d58c7f7..e3a9790 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,23 @@ -# Multi-account environment for Amazon QuickSight +# Guidance for Multi-account environment for Amazon QuickSight + +## Table of Contents + +1. [Introduction](#introduction) +1. [Core concepts and terminology](#core-concepts-and-terminology) +1. [Architecture](#architecture) + - [Guidance overview and AWS services to use](#guidance-overview-and-aws-services-to-use) + - [Architecture Diagram](#architecture-diagram) +1. [Deploying the guidance](#deploying-the-guidance) + - [re-requisites and assumptions](#pre-requisites-and-assumptions) + - [Preparing deployment using the helper script](#preparing-deployment-using-the-helper-script) + - [Deploying _Deployment account_ assets](#deploying-deployment-account-assets) + - [Deploying _Development a.k.a. first stage account_ assets](#deploying-development-aka-first-stage-account-assets) +1. [Using the guidance](#using-the-guidance) +1. [Guidance limitations](#guidance-limitations) +1. [Cleaning up](#cleaning-up) +1. [FAQ/Troubleshooting](#faqtroubleshooting) +1. [Contributing](#contributing) +1. [License](#license) ## Introduction @@ -63,9 +82,15 @@ Here you will find definition to specific terms that will be used throughout the Cloud-native, serverless, business intelligence (BI) that will allow us to create assets in dev account and automatically progress them to the pre-production and production accounts. In order to perform automation we wil be using [QuickSight APIs](https://docs.aws.amazon.com/quicksight/latest/APIReference/Welcome.html) and its [support in CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/AWS_QuickSight.html) to manage assets. +#### EventBridge + +EventBridge makes it easier to build event-driven applications at scale using events generated from your applications, integrated SaaS applications, and AWS services. + +In our solution we use EventBridge to trigger our automation (the synthesizer Lambda function and the CI/CD pipeline). In first place, the synthesizer lambda function its triggered when a new version of a dashboard is created, this function will generate a set of files that will be stored in S3 that also will trigger the deployment of the CI/CD pipeline (in AWS CodePipeline) that is also integrated with EventBridge (in this case watching for new files created in S3). + #### Lambda: -Will allow us to synthesize CloudFormation templates from our assets in development to have them deployed in pre-production and then in production. The lambda function will synthesize CloudFormation templates that are parametrized (so they can be used in any environment stage) and will store the generated templates in S3 to be referred from the pipeline. There will be two templates generated: +The QSAssetsCFNSynthesizer lambda function is executed each time a new dashboard version is created in our Development account (via EventBridge rule that is created as part of the initial deployment), then it will check the QSTrackedAssets DynamoDB table to see if its a tracked asset, in case it is, it will run synthesizing in a CloudFormation template from our QS assets present in development. The QSAssetsCFNSynthesizer lambda function will generate deployment assets (CFN templates) that will be uploaded to S3 triggering a AWS CodePipeline pipeline that will have them deployed in pre-production and then in production. The lambda generates CloudFormation templates that are **nested and parametrized** (so they can be used in any environment stage) and also prevent reaching the limits of CFN template size (1MB when using S3). The lambda function will generate two different templates generated: * Source assets template: * If `TEMPLATE` deployment method is selected: the source asset CloudFormation template will be creating a QuickSight Template from the source analysis in Dev stage so the asset to be copied over the next phase of the pipeline. Phase dependent variables such as the source account ID, region, QS user name, etc... will be added as [parameters so they can be set (or overridden)](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-action-reference.html) by the different codepipeline phases. This template will be automatically deployed in the Dev account and pre-production account (to templatize the dashboard created in this stage). @@ -86,6 +111,13 @@ Central piece of the guidance that, from a centralized deployment account (that [Parameter overrides](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference-CloudFormation.html) are used in each stage to define environment dependent variables in CloudFormation such as the account ID, data-source connection strings, etc ... +#### DynamoDB + +We will be using two DynamoDB auxiliary tables: + +* QSTrackedAssets to register the QS assets (only dashboards are supported right now) that our CodePipeline pipeline will track across the different stages +* QSAssetParameters where we can to store and configure the different parameter values that our tracked resources in QSTrackedAssets need and their values for each deployment stage (DEV/PRE/PRO). For example if one of our dashboards uses a RDS database the host/port combination would be different in each of the stages so we need to be able to configure these values for them. + #### CloudFormation: Service that allows us to define our infrastructure as code. This service will be configured as an action provider for the Codepipeline deploy actions, deploying assets in the pre-production and production accounts in an efficient and scalable manner (updating only resources as needed and detecting changes). Two cloudformation templates by stage will be deployed @@ -94,6 +126,8 @@ Service that allows us to define our infrastructure as code. This service will To deploy with CodePipeline in the first stage (typically PRE) [StackSets will be used](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference-StackSets.html), then for the subsequent stages [StackSet instances](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference-StackSets.html#action-reference-StackInstances) (for the same StackSet used earlier) will be used. This is to follow the best practices as mentioned in the [Create a pipeline with AWS CloudFormation StackSets deployment actions tutorial](https://docs.aws.amazon.com/codepipeline/latest/userguide/tutorials-stackset-deployment.html). +All these templates are parametrized so the same template could be used for all the environments (DEV/PRE/PRO), the parameter values for each stage are retrieved at pipeline execution runtime based on the values configured in DynamoDB tables, [refer to DynamoDB section](#dynamodb) for more details. + #### Event Bridge: As [QuickSight is integrated with EventBridge](https://aws.amazon.com/blogs/business-intelligence/automate-your-amazon-quicksight-assets-deployment-using-the-new-amazon-eventbridge-integration/), this guidance automatically configures an [Event Bridge rule](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-rules.html) that triggers the synthesizer Lambda function each time a new dashboard is published or updated. The Lambda synthesizer function uses an environment variable where to specify the dashboard_id to monitor for changes to propagate through the pipeline. This prevents the pipeline to deploy changes from other dashboards that are updated in the development account. @@ -124,7 +158,7 @@ Guidance assets will need to be deployed in two of the accounts, the first accou * Data-sources in the Dev account *should be using secrets* for RDS, RDBMs and Redshift sources. Secrets corresponding to each stage should exist in all the target accounts (they will be passed as CFN parameter). For more information take refer to [create an AWS Secrets Manager secret](https://docs.aws.amazon.com/secretsmanager/latest/userguide/create_secret.html) * (only when using `TEMPLATE` as deployment method) If data-sources in the dev account are using a [QuickSight VPC connection](https://docs.aws.amazon.com/quicksight/latest/user/working-with-aws-vpc.html), an equivalent VPC connection *should exist* on the other stages accounts, the id of the vpc connection will be passed as CFN parameter in the deployment action orgs_manage_org_support-all-features.html). -* Deployment account is the [organization management account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html). At the moment the use of [delegated administrator accounts](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-orgs-delegated-admin.html) is not supported when using CloudFormation StackSet operations in CodePipeline. +* Deployment account is the [organization management account](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html). While using [delegated administrator accounts](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-orgs-delegated-admin.html) is now supported when using CloudFormation StackSet operations in CodePipeline it doesn't fully support all the features of this solution such as deploying to individual accounts (instead of OUs) or using nested stacks. This guidance requires (at least) do deploy two CloudFormation stacks: @@ -154,7 +188,7 @@ python deploy.py -h Below you can find an execution example for the script: ``` -python deploy.py --bucket --template_prefix templates --code_prefix code --bucket_region us-east-1 --deployment_account_id --development_account_id --prepro_account_id --production_account_id --pipeline_name QSCICDPipeline --quicksight_user --dashboard_id +python deploy.py --bucket --bucket_account_id --template_prefix templates --code_prefix code --bucket_region us-east-1 --deployment_account_id --development_account_id --prepro_account_id --production_account_id --pipeline_name QSCICDPipeline ``` ### Deploying _Deployment account_ assets @@ -185,11 +219,7 @@ Remember that you can use the deployment helper script to customize the template |PipelineS3BucketName|S3 Bucket to use for pipeline assets|String| qs-pipeline-bucket | |S3Region|Region where the S3 bucket will be hosted|String| us-east-1 | |QuickSightRegion|Region where QuickSight assets are hosted|String| us-east-1 | -|SrcQSAdminRegion|Admin region for your QS source account where your users are hosted|String| us-east-1 | -|DestQSAdminRegion|Admin region for your QS destination account where your users are hosted|String| us-east-1 | |AccountAdmin|IAM ARN that will be responsible for administering the Account (it will be able to manage the created KMS key for encryption). Eg your role/user arn |String| User defined| -|QSUser|QS Username in Account where the assets will be created|String| User defined| -|Stage1Name|Name of the first stage in the pipeline, e.g. DEV|String| DEV | |Stage2Name|Name of the first stage in the pipeline, e.g. PRE|String| PRE | |Stage3Name|Name of the first stage in the pipeline, e.g. PRO|String| PRO | |AssumeRoleExtId|IAM external ID to be used in when assuming the IAM role in the development account. [Refer to this link](https://a.co/47mgPwV) for more details|String| qsdeppipeline | @@ -215,24 +245,22 @@ The first stage account in our CI/CD pipeline will need to have the following as For convenience default values were provided for most of the parameters. Remember that you can use the deployment helper script to customize the template according to your environment and then easily deploy the stack. Refer to the [Preparing deployment using helper script section](#preparing-deployment-using-the-helper-script) for more details. + |Parameter name|Description|Type|Default Value| | ---- | ---- | ---- |---- | +|AssumeRoleExtId|Ext ID to be used in when assuming the IAM role in the development account|String| qsdeppipeline | |DeploymentAccountId|Account ID used for the deployment pipelines|String| User defined| |DeploymentS3Bucket|S3 Bucket to use for pipeline assets|String| qs-pipeline-bucket | -|AssumeRoleExtId|Ext ID to be used in when assuming the IAM role in the development account|String| qsdeppipeline | |QuickSightRegion|Region where QuickSight assets are hosted|String| us-east-1| |DeploymentS3Region|Region where the deployment (CI/CD) bucket resides|String| us-east-1| -|SourceQSUser|Source stage username to use to retrieve QS assets|String| User defined| -|DestQSUser|Dest stage username to use to share the created QS assets with|String| User defined| -|SourceCodeS3Bucket|S3 Bucket containing the code|String| User defined| -|SourceCodeKey| Key within S3 Bucket that contains the zipped code. For your convenience you have the source code zipped in the guidance under source/lambda/qs_assets_CFN_synthesizer folder| String| User defined| |LayerCodeKey| Key within S3 Bucket that contains the zipped code for the lambda layer with external libraries. For your convenience you have the source code zipped in the guidance under source/lambda/layer folder| String| User defined| -|StageNames| List of comma-separated names of the stages that your pipeline will be having (e.g. DEV, PRE, PRO)| String| DEV, PRE, PRO| -|DashboardId| Dashboard ID in development you want to track changes for | String| User defined| -|ReplicationMethod| Method to use to replicate the dashboard (could be either TEMPLATE or ASSETS_AS_BUNDLE)| String - AllowedValues are TEMPLATE/ASSETS_AS_BUNDLE| ASSETS_AS_BUNDLE| -|RemapDS | Whether or not to remap the data sources connection properties in the dashboard datasets (when using templates) or supported properties when using Assets As Bundle (more info here https://a.co/jeHZkOr)| String (YES/NO)| YES| |PipelineName | Name of the Code Pipeline whose source assets this lambda will be contributing to | String| QSCICDPipeline| - +|RemapDS | Whether or not to remap the data sources connection properties in the dashboard datasets (when using templates) or supported properties when using Assets As Bundle (more info here https://a.co/jeHZkOr)| String (YES/NO)| YES| +|GenerateNestedStacks | Whether or not to generate CFN nested stacks to be used by code pipeline CAUTION, this setting helps circumvent the potential issue of reaching the max template size (1MB) but can also break the resulting template, disable it if you experience any issues wit CFN during pipeline deployments| String (YES/NO)| YES| +|ReplicationMethod| Method to use to replicate the dashboard (could be either TEMPLATE or ASSETS_AS_BUNDLE)| String - AllowedValues are TEMPLATE/ASSETS_AS_BUNDLE| ASSETS_AS_BUNDLE| +|SourceCodeKey| Key within S3 Bucket that contains the zipped code. For your convenience you have the source code zipped in the guidance under source/lambda/qs_assets_CFN_synthesizer folder| String| User defined| +|SourceCodeS3Bucket|S3 Bucket containing the code|String| User defined| +|StageNames| List of comma-separated names of the stages that your pipeline will be having (e.g. DEV, PRE, PRO)| String| DEV, PRE, PRO| ## Using the guidance @@ -241,14 +269,15 @@ Once you have the guidance deployed you will be ready to start using your newly In order to do so you just need to follow this procedure: -1. [**In your Development account**] Go to your QuickSight console and in the [analyses section](https://us-east-1.quicksight.aws.amazon.com/sn/start/analyses) search for the available analysis, you should see an analysis named `Web and Social Media Analytics analysis` with an orange label that indicates that is a `SAMPLE`analysis -1. [**In your Development account**] Open then analysis and publish it as a dashboard, then note the ID of the dashboard (that ou can see at the end of the URL of the page as you will need it in later steps) -1. [**In your Development account**] Open the synthesizer Lambda function created by the development account Cloudformation Stack (you can easily get a link to it from your Cloud Formation stack using the resources tab). Navigate to the configuration tab in lambda and locate the [Environment Variables section](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-config). Now set the dasboard ID you got from the previous step as value for `DASHBOARD_ID` +1. [**In your Development account**] Go to your QuickSight console and in the [analysis section](https://us-east-1.quicksight.aws.amazon.com/sn/start/analyses) search for the available analysis, you should see an analysis named `Web and Social Media Analytics analysis` with an orange label that indicates that is a `SAMPLE`analysis.). +1. [**In your Development account**] Open then analysis and publish it as a dashboard, name it `Test Pipeline Dashboard`, then note the ID of the dashboard (that ou can see at the end of the URL https://.quicksight.aws.amazon.com/sn/dashboards/) as you will need it in later steps. 1. Ensure that the accounts from subsequent stages are subscribed to QuickSight Enterprise edition. -1. Ensure the AWSCloudFormationStackSetExecutionRole exists in all the stages AWS Accounts. You can [check this by opening this page in IAM](https://us-east-1.console.aws.amazon.com/iam/home?region=us-east-1#/roles/details/AWSCloudFormationStackSetExecutionRole?section=permissions) **in each of the deployment accounts**. -1. [**In your Development account**] Choose the desired deployment method `TEMPLATE` or `ASSETS_AS_BUNDLE`. This is controlled via the *REPLICATION_METHOD* Lambda environment variable (set to `ASSETS_AS_BUNDLE` by default) -1. [**In your Development account**] Manually execute the lambda function present in the development account making sure you change the *MODE* to `INITIALIZE`. -1. [**In your Development account**] This will make the lambda function to scan the resources that need to be synthesized in the source account and will create CloudFormation parameter configuration files in the deployment S3 bucket (a pair of files for each stage will be created, one for source assets and another one for destination assets). The parameter file for the Web and Social Media Analytics analysis dashboard will look like this: +1. Ensure the AWSCloudFormationStackSetExecutionRole exists in all the stages AWS Accounts. You can [check this by opening this page in IAM](https://us-east-1.console.aws.amazon.com/iam/home?region=us-east-1#/roles/details/AWSCloudFormationStackSetExecutionRole?section=permissions) **in each of the stage accounts (DEV/PRE/PRO)**. +1. [**In your Development account**] Choose the desired deployment method `TEMPLATE` or `ASSETS_AS_BUNDLE`. This is controlled via the *REPLICATION_METHOD* Lambda environment variable (it is set to `ASSETS_AS_BUNDLE` by default) +1. [**In your Deployment account**] Navigate to DynamoDB console and open the [tables section](https://us-east-1.console.aws.amazon.com/dynamodbv2/home?region=us-east-1#tables). Here you should see two tables named QSAssetParameters- and QSTrackedAssets- where PipelineName correspond to the pipeline name you set on the [deployment template parameters](#deploying-deployment-account-assets). +1. [**In your Deployment account**] Click on QSTrackedAssets- table and under the `Actions` menu click on `Create Item`. create an item with the following fields; AssetId which should be the dashboard ID you noted down in step 2. and AssetType set to `DASHBOARD` +1. [**In your Development account**] Manually execute the lambda function present in the development account making sure the *MODE* variable is set to `INITIALIZE` (this should be already set by default). +1. [**In your Development account**] The lambda function will scan the resources that need to be synthesized in the source account based on the items found on the QSTrackedAssets-. The lambda function will initialize the QSAssetParameters- DynamoDB table with four items (two per each stage PRE and PRO in our default configuration). Each stage will have two items, one with AssetType set to `source` (that will be empty if you use `ASSETS_AS_BUNDLE` as ReplicationMethod) and another one with AssetType set to `dest` which correspond to the assets that CodePipeline will deploy via CloudFormation templates in your stage accounts (DEV/PRE/PRO). Each record will contain two additional attributes `ParameterDefinition` and `ParameterDefinitionHelp`. The `ParameterDefinition` is JSON array containing ParameterKey and ParameterValue value pairs for each of the parameters needed by the QS assets configured in QSTrackedAssets-. A detailed explanation of these parameters could be found on the `ParameterDefinitionHelp` attribute. For our example with the `Web and Social Media Analytics` dashboard the `ParameterDefinition` attribute in the QSAssetParameters- DynamoDB should look similar to the following ```json [ @@ -270,7 +299,7 @@ In order to do so you just need to follow this procedure: } ] ``` -6. [**In your Deployment account**] Locate the generated files in S3 (the lambda function will output the location of these files upon execution) and edit the contents to parametrize the deployment on the subsequent stages (typically PRE and PRO). If you have set the *REMAP_DS* environment variable to `YES` (default), the lambda function you will also need to define the data-source configuration parameters for each stage. for example, for the Web and Social Media Analytics sample dataset the parameter values should be like this (notice that the parameter keys could be different): +6. [**In your Deployment account**] Now edit the records corresponding to AssetType `dest` for each of the StageNames (PRE and PRO) setting the `ParameterDefinition` as needed, for the example of Web and Social Media Analytics dashboard it would look like the following (notice that the parameter keys could be different): ```json [ @@ -292,15 +321,16 @@ In order to do so you just need to follow this procedure: } ] ``` -7. [*In your Development account*] Ini case you are not using the Web and Social Media Analytics sample analysis and your analysis has different dataset you will need to execute the [describe-data-source](https://docs.aws.amazon.com/cli/latest/reference/quicksight/describe-data-source.html) operation for each of the data sources used in your dashboard to understand the resources they use (e.g. S3 buckets, RDS databases, Redshift clusters ...) and then determine which values should you use in each of the subsequent stage environments (PRE and PRO). -7. [*In your Development account*] Once you have edited the parameter files for each stage upload the edited files to the same location in S3 and then execute the lambda function changing the environment variable *MODE* to `DEPLOY` or just create a new version of the dashboard configured in the pipeline. This will trigger the Lambda that will create the [CloudFormation artifacts](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-cfn-artifacts.html) according to the selected *REPLICATION_METHOD* and will upload them to the deployment S3 bucket monitored by EventBridge that will trigger the execution of the pipeline -7. [*In your Deployment account*] Check the pipeline execution and the deployment in your second stage (typically PRE), once the deployment is complete navigate to the quicksight console in your region to see the deployed analysis. -7. [*In your Deployment account*] Once you have validated the analysis in the first stage (PRE) you may go back to the pipeline and decide whether or not you want to approve the change so it reaches the second stage (typically PRO) +7. [**In your Development account**] If you are not using the Web and Social Media Analytics sample analysis and your analysis has different dataset you will need to execute the [describe-data-source](https://docs.aws.amazon.com/cli/latest/reference/quicksight/describe-data-source.html) operation for each of the data sources used in your dashboard to understand the resources they use (e.g. S3 buckets, RDS databases, Redshift clusters ...) and then determine which values should you use in each of the subsequent stage environments (PRE and PRO). You can also refer to the `ParameterDefinitionHelp` attribute in the QSAssetParameters- DynamoDB table to get more insights from each of the parameters and the origin QuickSight asset that uses it. +7. [**In your Development account**] Once you have edited the `ParameterDefinition` attributes for each stage in the QSAssetParameters- DynamoDB edit your Lambda function environment variable *MODE* to `DEPLOY`. This will change the Lambda behavior to create the [CloudFormation artifacts](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-cfn-artifacts.html) according to the selected *REPLICATION_METHOD* and upload them to the deployment S3 bucket monitored by EventBridge that will trigger the execution of the pipeline. +7. [**In your Development account**] Access to QuickSight and in the [analysis section](https://us-east-1.quicksight.aws.amazon.com/sn/start/analyses) search for the `Web and Social Media Analytics analysis` analysis, make a change on it (e.g. add a KPI visual) and then and publish it as a dashboard replacing the dashboard you created in step 2. (`Test Pipeline Dashboard`). As we have an event bridge rule configured to run our synthesizer lambda function each time a dashboard version is created the previous step will trigger the complete pipeline. +7. [**In your Deployment account**] Check the pipeline execution and the deployment in your second stage (typically PRE), once the deployment is complete navigate to the quicksight console in your region to see the deployed analysis. +7. [**In your Deployment account**] Once you have validated the analysis in the first stage (PRE) you may go back to the pipeline and decide whether or not you want to approve the change so it reaches the second stage (typically PRO) 7. After changes have been approved you should be able to see the deployment started that will progress your changes to PRO ## Guidance limitations -* At the moment the Pipeline supports the continuous deployment of **one single dashboard** if you want to deploy multiple dashboards you will need to create different pipelines and synthesizer lambda functions by creating multiple instances of the Deployment account and First account templates. +* At the moment the Pipeline supports the continuous deployment of QuickSight DASHBOARDS, ANALYSIS or Q_TOPICS are not supported. * When using `TEMPLATE` as replication method, supported datasources are RDS, Redshift, S3 and Athena * When using `ASSETS_AS_BUNDLE` as replication method, all the datasources are supported excepting the ones [listed here](https://docs.aws.amazon.com/quicksight/latest/developerguide/asset-bundle-ops.html). Also uploaded file datasources are not supported. @@ -338,7 +368,7 @@ This is expected as by default, a pipeline starts automatically when it is creat ### Problem -When executing my pipeline I get the following error: Datasource XXXX (ID YYY ) is an RDS datasource and it is not configured with a secret, cannot proceed +When executing my pipeline I get the following error: Datasource XXXX (ID YYY) is an RDS datasource and it is not configured with a secret, cannot proceed ### Solution @@ -346,21 +376,13 @@ When you use RDBMs datasources in QuickSight (e.g. RDS, Redshift) they require y ### Problem -When executing changes in the pipeline not all the changes are deployed in order, if I summit a new change while the pipeline is still deploying the previous one (or pending approval to get to the last stage) a newer change can overtake a previous one. - -### Solution - -This is expected when Code Pipeline works in SUPERSEDED mode, where the pipelines is able to process multiple executions at the same time. Each execution is run through the pipeline separately. The pipeline processes each execution in order and might supersede an earlier execution with a later one. The following rules are used to process executions in a pipeline for SUPERSEDED mode, [more info here](https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts-how-it-works.html#concepts-how-it-works-executions). If you want your pipeline to lock stages when an execution is being processed so waiting executions do not overtake executions that have already started you might want to take a look to the [QUEUED mode here.](https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts-how-it-works.html#concepts-how-it-works-executions-queued). You can change the pipeline mode according to your needs, [more info here](https://docs.aws.amazon.com/codepipeline/latest/userguide/execution-modes.html). - -### Problem - When executing the synthesizer lambda function it raises an error stating ```An error occurred (ResourceNotFoundException) when calling the DescribeDashboard operation``` ### Solution Ensure the dashboard specified on the `DASHBOARD_ID` Lambda environment variable exists in the development account (a.k.a first stage account) -## Security +## Contributing See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. diff --git a/deploy.py b/deploy.py index c03cf73..b21f9d9 100644 --- a/deploy.py +++ b/deploy.py @@ -5,6 +5,7 @@ import argparse import shutil import os +import sys def uploadFileToS3(bucket: str, filename: str, region: str, prefix=None, object_name=None): @@ -56,6 +57,30 @@ def uploadFileToS3(bucket: str, filename: str, region: str, prefix=None, object_ def print_yellow(text): print("\033[93m {}\033[00m" .format(text)) def print_green(text): print("\033[92m {}\033[00m" .format(text)) +def print_red(text): print("\033[91m {}\033[00m" .format(text)) + +def check_bucket_public_block_acls(bucket_name, region): + s3 = boto3.client('s3', region_name=region) + s3control = boto3.client('s3control', region_name=region) + try: + controlresponse = s3control.get_public_access_block( + AccountId=param_bucket_account_id + ) + response = s3.get_public_access_block(Bucket=bucket_name) + if controlresponse['PublicAccessBlockConfiguration']['BlockPublicAcls'] is True: + print_red('ERROR: The AWS Account with id {account_id} has have public access block configured for all buckets, so the script cannot proceed, change this configuration and try again'.format(account_id=param_bucket_account_id)) + print_red('Change this setting in the S3 console https://{region}.console.aws.amazon.com/s3/settings'.format(region=region)) + return False + if response['PublicAccessBlockConfiguration']['BlockPublicAcls'] is True: + print_red('ERROR: The bucket {bucket} has have public access block configured, so the script cannot proceed, change this configuration and try again'.format(bucket=bucket_name)) + print_red('Change this setting in the S3 console https://{region}.console.aws.amazon.com/s3/buckets/{bucket_name}?bucketType=general&tab=permissions'.format(region=region, bucket_name=bucket_name)) + return False + else: + return True + except ClientError as e: + print("Error checking S3 Block Public Access configuration") + return False + def validate_bucket(bucket_name): s3 = boto3.resource('s3') @@ -75,6 +100,7 @@ def validate_bucket(bucket_name): parser=argparse.ArgumentParser() +parser.add_argument("--bucket_account_id", required=True, help="Account ID owning the S3 bucket where the solution code and templates will be uploaded, you need to have valid IAM credentials to upload objects to it") parser.add_argument("--bucket", required=True, help="S3 bucket where the solution code and templates will be uploaded, you need to have valid IAM credentials to upload objects to it") parser.add_argument("--bucket_region", required=True, help="Region of S3 bucket where the solution code and templates will be uploaded") parser.add_argument("--template_prefix",required=True, help="prefix within your S3 bucket where templates will be uploaded, you need to have valid IAM credentials to upload objects to it") @@ -83,11 +109,9 @@ def validate_bucket(bucket_name): parser.add_argument("--development_account_id", required=True, help="Account ID that you will be using as development account. It can be any account within the same organization as the deployment account") parser.add_argument("--prepro_account_id", required=True, help="Account ID that you will be using as pre-production account. It can be any account within the same organization as the deployment account") parser.add_argument("--production_account_id", required=True, help="Account ID that you will be using as production account. It can be any account within the same organization as the deployment account") -parser.add_argument("--quicksight_user", help="Username for QS that will be the owner of the assets created by the pipeline", default='') parser.add_argument("--pipeline_name", help="Name of the pipeline that will be created, defaults to QSPipeline", default="QSPipeline") parser.add_argument("--admin_role", help="Name of the pipeline that will be created, defaults to QSPipeline", default="Admin") -parser.add_argument("--dashboard_id", help="Id of the existing dashboard in the development account that you want the pipeline to promote across environments", default="") - +parser.add_argument("--approval_email", help="Email to send by the pipeline for approval step", default="user@domain.com") args=parser.parse_args() LOCAL_LAYER_PATH='source/lambda/layer/lambdaLayerBotoYAML.zip' @@ -95,25 +119,27 @@ def validate_bucket(bucket_name): ZIP_CODE_FILE='qs_assets_CFN_synthesizer' FIRST_ACCOUNT_TEMPLATE_PATH='deployment/CFNStacks/firstStageAccount_template.yaml' DEPLOYMENT_TEMPLATE_PATH='deployment/CFNStacks/deploymentAccount_template.yaml' +CFN_STACK_SET_EXECUTION_ROLE_NAME='deployment/CFNStacks/AWSCloudFormationStackSetExecutionRole.yml' CFN_STACKS_ROLE_CREATION_TEMPLATE_PATH='deployment/CFNStacks/AWSCloudFormationStackSetExecutionRole.yml' WORKSPACE_DIR='workspace' first_account_template = yaml.safe_load(open(FIRST_ACCOUNT_TEMPLATE_PATH, 'r')) deployment_account_template = yaml.safe_load(open(DEPLOYMENT_TEMPLATE_PATH, 'r')) +cfn_stack_role_creation_template = yaml.safe_load(open(CFN_STACKS_ROLE_CREATION_TEMPLATE_PATH, 'r')) param_bucket = args.bucket +param_bucket_account_id = args.bucket_account_id code_prefix = args.code_prefix template_prefix = args.template_prefix deployment_account_id = args.deployment_account_id development_account_id = args.development_account_id preproduction_account_id = args.prepro_account_id production_account_id = args.production_account_id +approval_email = args.approval_email admin_role = args.admin_role depl_admin_role_arn = 'arn:aws:iam::{account_id}:role/{role}'.format(account_id=deployment_account_id, role=admin_role) -quicksight_user = args.quicksight_user -quicksight_dashboard_id = args.dashboard_id first_account_template['Parameters']['SourceCodeS3Bucket']['Default'] = param_bucket @@ -121,16 +147,17 @@ def validate_bucket(bucket_name): first_account_template['Parameters']['LayerCodeKey']['Default'] = '{code_prefix}/lambdaLayerBotoYAML.zip'.format(code_prefix=code_prefix) first_account_template['Parameters']['DeploymentAccountId']['Default'] = deployment_account_id first_account_template['Parameters']['PipelineName']['Default'] = args.pipeline_name -first_account_template['Parameters']['DestQSUser']['Default'] = quicksight_user -first_account_template['Parameters']['SourceQSUser']['Default'] = quicksight_user -first_account_template['Parameters']['DashboardId']['Default'] = quicksight_dashboard_id +first_account_template['Parameters']['DeploymentS3Bucket']['Default'] = 'qs-pipeline-bucket-{account_id}'.format(account_id=deployment_account_id) deployment_account_template['Parameters']['DevelopmentAccountId']['Default'] = development_account_id deployment_account_template['Parameters']['PreProdAccountId']['Default'] = preproduction_account_id deployment_account_template['Parameters']['ProdAccountId']['Default'] = production_account_id -deployment_account_template['Parameters']['QSUser']['Default'] = quicksight_user deployment_account_template['Parameters']['AccountAdminARN']['Default'] = depl_admin_role_arn deployment_account_template['Parameters']['PipelineName']['Default'] = args.pipeline_name +deployment_account_template['Parameters']['ApprovalEmail']['Default'] = approval_email +deployment_account_template['Parameters']['PipelineS3BucketName']['Default'] = 'qs-pipeline-bucket-{account_id}'.format(account_id=deployment_account_id) + +cfn_stack_role_creation_template['Parameters']['AdministratorAccountId']['Default'] = deployment_account_id try: shutil.rmtree(WORKSPACE_DIR) @@ -144,10 +171,11 @@ def validate_bucket(bucket_name): first_account_template_file = 'firstStageAccount_template_customized.yaml' deployment_account_template_file = 'deploymentAccount_template_customized.yaml' -cfn_stack_role_creation_template_file = 'AWSCloudFormationStackSetExecutionRole.yaml' +cfn_stack_role_creation_template_file = 'AWSCloudFormationStackSetExecutionRole_customized.yaml' output_first_account_template_path = "{workspace_dir}/{file}".format(workspace_dir=WORKSPACE_DIR, file=first_account_template_file) output_deployment_account_template_path = "{workspace_dir}/{file}".format(workspace_dir=WORKSPACE_DIR, file=deployment_account_template_file) +output_stack_role_creation_template_path = "{workspace_dir}/{file}".format(workspace_dir=WORKSPACE_DIR, file=cfn_stack_role_creation_template_file) # Write first account customized template with open(output_first_account_template_path, 'w+') as f: @@ -157,51 +185,59 @@ def validate_bucket(bucket_name): with open(output_deployment_account_template_path, 'w+') as f: yaml.dump(deployment_account_template, f) +# Write cfn stack role creation customized template +with open(output_stack_role_creation_template_path, 'w+') as f: + yaml.dump(cfn_stack_role_creation_template, f) + # zip code zip_code_output = "{workspace_dir}/{archive_file}".format(workspace_dir=WORKSPACE_DIR, archive_file=ZIP_CODE_FILE) shutil.make_archive(base_name=zip_code_output, root_dir=LOCAL_CODE_PATH, format='zip') -# upload layer -uploadFileToS3(bucket=param_bucket, filename=LOCAL_LAYER_PATH,region=args.bucket_region, prefix=code_prefix, object_name='lambdaLayerBotoYAML.zip') +if not check_bucket_public_block_acls(bucket_name=param_bucket, region=args.bucket_region): + print_red('ERROR: Either AWS account id {aws_account_id} or bucket {bucket} has Block public access enabled and will not allow the code to be uploaded change it to proceed'.format(aws_account_id=param_bucket_account_id, bucket=param_bucket)) +else: -# upload code -uploadFileToS3(bucket=param_bucket, filename='{file_name}.zip'.format(file_name=zip_code_output),region=args.bucket_region, prefix=code_prefix, object_name='qs_assets_CFN_synthesizer.zip') + # upload layer + uploadFileToS3(bucket=param_bucket, filename=LOCAL_LAYER_PATH,region=args.bucket_region, prefix=code_prefix, object_name='lambdaLayerBotoYAML.zip') + # upload code + uploadFileToS3(bucket=param_bucket, filename='{file_name}.zip'.format(file_name=zip_code_output),region=args.bucket_region, prefix=code_prefix, object_name='qs_assets_CFN_synthesizer.zip') -#remove previous zipped files -cfn_one_click_deployment_url =\ - "https://console.aws.amazon.com/cloudformation/home?region={region}#/stacks/new?stackName=CICDDeployment{PipelineName}&templateURL=https://{bucket_name}.s3.amazonaws.com/{template_prefix}/{template_file}"\ - .format(region=args.bucket_region, PipelineName=args.pipeline_name, bucket_name=args.bucket, template_prefix=template_prefix, template_file=deployment_account_template_file) -cfn_one_click_first_stage_url =\ - "https://console.aws.amazon.com/cloudformation/home?region={region}#/stacks/new?stackName=CICDFirstStage{PipelineName}&templateURL=https://{bucket_name}.s3.amazonaws.com/{template_prefix}/{template_file}"\ - .format(region=args.bucket_region, PipelineName=args.pipeline_name, bucket_name=args.bucket, template_prefix=template_prefix, template_file=first_account_template_file) -cfn_one_cfn_stack_role_creation_url =\ - "https://console.aws.amazon.com/cloudformation/home?region={region}#/stacks/new?stackName=DelegatedAccessStacksets&templateURL=https://{bucket_name}.s3.amazonaws.com/{template_prefix}/{template_file}"\ - .format(region=args.bucket_region, bucket_name=args.bucket, template_prefix=template_prefix, template_file=cfn_stack_role_creation_template_file) + #remove previous zipped files -print("Assets successfully uploaded to {bucket} bucket".format(bucket=param_bucket)) + cfn_one_click_deployment_url =\ + "https://console.aws.amazon.com/cloudformation/home?region={region}#/stacks/new?stackName=CICDDeployment{PipelineName}&templateURL=https://{bucket_name}.s3.amazonaws.com/{template_prefix}/{template_file}"\ + .format(region=args.bucket_region, PipelineName=args.pipeline_name, bucket_name=args.bucket, template_prefix=template_prefix, template_file=deployment_account_template_file) + cfn_one_click_first_stage_url =\ + "https://console.aws.amazon.com/cloudformation/home?region={region}#/stacks/new?stackName=CICDFirstStage{PipelineName}&templateURL=https://{bucket_name}.s3.amazonaws.com/{template_prefix}/{template_file}"\ + .format(region=args.bucket_region, PipelineName=args.pipeline_name, bucket_name=args.bucket, template_prefix=template_prefix, template_file=first_account_template_file) + cfn_one_cfn_stack_role_creation_url =\ + "https://console.aws.amazon.com/cloudformation/home?region={region}#/stacks/new?stackName=DelegatedAccessStacksets&templateURL=https://{bucket_name}.s3.amazonaws.com/{template_prefix}/{template_file}"\ + .format(region=args.bucket_region, bucket_name=args.bucket, template_prefix=template_prefix, template_file=cfn_stack_role_creation_template_file) + print("Assets successfully uploaded to {bucket} bucket".format(bucket=param_bucket)) -if validate_bucket(bucket_name=param_bucket): - # upload firstStageAccount_template - uploadFileToS3(bucket=param_bucket, filename=output_first_account_template_path, region=args.bucket_region, prefix=template_prefix, object_name=first_account_template_file) + if validate_bucket(bucket_name=param_bucket): - # upload deployment_template - uploadFileToS3(bucket=param_bucket, filename=output_deployment_account_template_path, region=args.bucket_region, prefix=template_prefix, object_name=deployment_account_template_file) + # upload firstStageAccount_template + uploadFileToS3(bucket=param_bucket, filename=output_first_account_template_path, region=args.bucket_region, prefix=template_prefix, object_name=first_account_template_file) - # upload cfn stack role creation template - uploadFileToS3(bucket=param_bucket, filename=CFN_STACKS_ROLE_CREATION_TEMPLATE_PATH, region=args.bucket_region, prefix=template_prefix, object_name=cfn_stack_role_creation_template_file) + # upload deployment_template + uploadFileToS3(bucket=param_bucket, filename=output_deployment_account_template_path, region=args.bucket_region, prefix=template_prefix, object_name=deployment_account_template_file) - print() - print_green('NEXT STEPS') - print('# 1. Deploy deployment account assets in Deployment account with ID {dep_account_id} using the following URL: {dep_dep_url}'.format(dep_account_id=deployment_account_id, dep_dep_url=cfn_one_click_deployment_url)) - print('# 2. Deploy first stage account assets in Development account with ID {dev_account_id} using the following URL: {dev_dep_url}'.format(dev_account_id=development_account_id, dev_dep_url=cfn_one_click_first_stage_url)) - print('# 3 (optional). If your account is not configured for CFN Stack Sets operation in self managed mode, deploy CloudFormation StackSet role creation in each environment account ({dev_account_id}, {pre_account_id}, {pro_account_id}) using the following URL: {dev_dep_url}'\ - .format(dev_account_id=development_account_id, pre_account_id=preproduction_account_id, pro_account_id=production_account_id, dev_dep_url=cfn_one_cfn_stack_role_creation_url)) -else: - print() - print_yellow('MANUAL ACTION REQUIRED - read below') - print('Bucket {bucket_name} doesn''t have static webhosting enabled which is required to deploy CloudFormation templates directly from S3'.format(bucket_name=param_bucket)) - print('Solution lambda code files have been uploaded to your bucket so they can be used in the templates, but you will need to manually upload them in the CloudFormation console, files can be found under the workspace/ directory.') \ No newline at end of file + # upload cfn stack role creation template + uploadFileToS3(bucket=param_bucket, filename=output_stack_role_creation_template_path, region=args.bucket_region, prefix=template_prefix, object_name=cfn_stack_role_creation_template_file) + + print() + print_green('NEXT STEPS') + print('# 1. Deploy deployment account assets in Deployment account with ID {dep_account_id} using the following URL: {dep_dep_url}'.format(dep_account_id=deployment_account_id, dep_dep_url=cfn_one_click_deployment_url)) + print('# 2. Deploy first stage account assets in Development account with ID {dev_account_id} using the following URL: {dev_dep_url}'.format(dev_account_id=development_account_id, dev_dep_url=cfn_one_click_first_stage_url)) + print('# 3 (optional). If your account is not configured for CFN Stack Sets operation in self managed mode, deploy CloudFormation StackSet role creation in each environment account ({dev_account_id}, {pre_account_id}, {pro_account_id}) using the following URL: {dev_dep_url}'\ + .format(dev_account_id=development_account_id, pre_account_id=preproduction_account_id, pro_account_id=production_account_id, dev_dep_url=cfn_one_cfn_stack_role_creation_url)) + else: + print() + print_yellow('MANUAL ACTION REQUIRED - read below') + print('Bucket {bucket_name} doesn''t have static webhosting enabled which is required to deploy CloudFormation templates directly from S3'.format(bucket_name=param_bucket)) + print('Solution lambda code files have been uploaded to your bucket so they can be used in the templates, but you will need to manually upload them in the CloudFormation console, files can be found under the workspace/ directory.') \ No newline at end of file diff --git a/deployment/CFNStacks/AWSCloudFormationStackSetExecutionRole.yml b/deployment/CFNStacks/AWSCloudFormationStackSetExecutionRole.yml index acbcbd7..052f318 100755 --- a/deployment/CFNStacks/AWSCloudFormationStackSetExecutionRole.yml +++ b/deployment/CFNStacks/AWSCloudFormationStackSetExecutionRole.yml @@ -16,16 +16,17 @@ Resources: ExecutionRole: Type: AWS::IAM::Role Properties: - RoleName: !Ref ExecutionRoleName + RoleName: + Ref: ExecutionRoleName AssumeRolePolicyDocument: Version: 2012-10-17 Statement: - Effect: Allow Principal: AWS: - - !Ref AdministratorAccountId + - Ref: AdministratorAccountId Action: - sts:AssumeRole Path: / ManagedPolicyArns: - - !Sub arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess + - Fn::Sub: arn:${AWS::Partition}:iam::aws:policy/AdministratorAccess diff --git a/deployment/CFNStacks/deploymentAccount_template.yaml b/deployment/CFNStacks/deploymentAccount_template.yaml index 7be26f7..7201406 100644 --- a/deployment/CFNStacks/deploymentAccount_template.yaml +++ b/deployment/CFNStacks/deploymentAccount_template.yaml @@ -27,25 +27,10 @@ Parameters: Description: Region where QuickSight assets are hosted Type: String Default: "us-east-1" - SrcQSAdminRegion: - Description: Admin region for your QS source account where your users are hosted - Type: String - Default: "us-east-1" - DestQSAdminRegion: - Description: Admin region for your QS destination account where your users are hosted - Type: String - Default: "us-east-1" - QSUser: - Description: QS Username in Account where the assets will be created - Type: String AccountAdminARN: Description: IAM ARN that will be responsible for administering the Account (it will be able to manage the created KMS key for encryption). Eg your role/user arn Type: String Default: role/Administrator - Stage1Name: - Description: Name of the first stage in the pipeline, e.g. DEV - Type: String - Default: DEV Stage2Name: Description: Name of the first stage in the pipeline, e.g. PRE Type: String @@ -146,6 +131,8 @@ Resources: Status: Enabled VersioningConfiguration: Status: Enabled + WebsiteConfiguration: + IndexDocument: index.hmtl NotificationConfiguration: EventBridgeConfiguration: EventBridgeEnabled: true @@ -229,6 +216,7 @@ Resources: Name: Ref: PipelineName PipelineType: V2 + ExecutionMode: QUEUED RoleArn: Fn::GetAtt: CodePipelineRole.Arn ArtifactStore: @@ -295,7 +283,7 @@ Resources: Ref: QuickSightRegion StackSetName: Fn::Sub: ${PipelineName}-QSSourceAssets - TemplatePath: SourceAssetsArtifact::QStemplate_CFN_SOURCE.yaml + TemplatePath: SourceAssetsArtifact::QS_assets_CFN_SOURCE.yaml Parameters: Fn::Sub: SourceAssetsArtifact::source_cfn_template_parameters_${Stage2Name}.txt InputArtifacts: @@ -550,6 +538,58 @@ Resources: - Fn::Sub: "arn:aws:s3:::${PipelineS3BucketName}" - Fn::Sub: "arn:aws:s3:::${PipelineS3BucketName}/*" Sid: VisualEditor0 + - Action: + - dynamodb:BatchGetItem + - dynamodb:BatchWriteItem + - dynamodb:ConditionCheckItem + - dynamodb:PutItem + - dynamodb:DescribeTable + - dynamodb:DeleteItem + - dynamodb:GetItem + - dynamodb:Scan + - dynamodb:Query + - dynamodb:UpdateItem + Effect: Allow + Resource: + - Fn::Sub: arn:aws:dynamodb:${AWS::Region}:${AWS::AccountId}:table/QSAssetParameters-${PipelineName} + - Fn::Sub: arn:aws:dynamodb:${AWS::Region}:${AWS::AccountId}:table/QSTrackedAssets-${PipelineName} + paramDDBTable: + Type: AWS::DynamoDB::Table + Properties: + AttributeDefinitions: + - AttributeName: StageName + AttributeType: S + - AttributeName: AssetType + AttributeType: S + KeySchema: + - AttributeName: StageName + KeyType: HASH + - AttributeName: AssetType + KeyType: RANGE + ProvisionedThroughput: + ReadCapacityUnits: 5 + WriteCapacityUnits: 5 + TableName: + Fn::Sub: QSAssetParameters-${PipelineName} + + trackedAssetsTable: + Type: AWS::DynamoDB::Table + Properties: + AttributeDefinitions: + - AttributeName: AssetId + AttributeType: S + - AttributeName: AssetType + AttributeType: S + KeySchema: + - AttributeName: AssetId + KeyType: HASH + - AttributeName: AssetType + KeyType: RANGE + ProvisionedThroughput: + ReadCapacityUnits: 5 + WriteCapacityUnits: 5 + TableName: + Fn::Sub: QSTrackedAssets-${PipelineName} Outputs: Codepipeline: Description: Link to the codepipeline created to implement QuickSight CI/CD diff --git a/deployment/CFNStacks/deploymentAccount_template_delegated.yaml b/deployment/CFNStacks/deploymentAccount_template_delegated.yaml new file mode 100644 index 0000000..8695923 --- /dev/null +++ b/deployment/CFNStacks/deploymentAccount_template_delegated.yaml @@ -0,0 +1,634 @@ +AWSTemplateFormatVersion: 2010-09-09 +Description: Cloudformation to deploy code pipeline and auxiliary assets for the Guidance for Multi-account Environments on Amazon QuickSight (SO9402) + +Parameters: + DevelopmentAccountId: + Description: Account ID hosting for development environment + Type: String + AllowedPattern: "^[0-9]{12}" + PreProdAccountId: + Description: Account ID hosting for pre-production environment + Type: String + AllowedPattern: "^[0-9]{12}" + ProdAccountId: + Description: Account ID hosting for production environment + Type: String + AllowedPattern: "^[0-9]{12}" + PipelineS3BucketName: + Description: S3 Bucket to use for pipeline assets, could be an existing bucket, in that case make sure you change the CreateBucket parameter to false + Type: String + AllowedPattern: "^[0-9a-z\\.-]{3,63}" + Default: "qs-pipeline-bucket" + S3Region: + Description: Region where the S3 bucket will be hosted + Type: String + Default: "us-east-1" + QuickSightRegion: + Description: Region where QuickSight assets are hosted + Type: String + Default: "us-east-1" + SrcQSAdminRegion: + Description: Admin region for your QS source account where your users are hosted + Type: String + Default: "us-east-1" + DestQSAdminRegion: + Description: Admin region for your QS destination account where your users are hosted + Type: String + Default: "us-east-1" + QSUser: + Description: QS Username in Account where the assets will be created + Type: String + AccountAdminARN: + Description: IAM ARN that will be responsible for administering the Account (it will be able to manage the created KMS key for encryption). Eg your role/user arn + Type: String + Default: role/Administrator + Stage1Name: + Description: Name of the first stage in the pipeline, e.g. DEV + Type: String + Default: DEV + Stage2Name: + Description: Name of the first stage in the pipeline, e.g. PRE + Type: String + Default: PRE + Stage3Name: + Description: Name of the first stage in the pipeline, e.g. PRO + Type: String + Default: PRO + AssumeRoleExtId: + Description: IAM external ID to be used in when assuming the IAM role in the development account. See https://a.co/47mgPwV for more details + Type: String + Default: qsdeppipeline + PipelineName: + Description: "Name for the Code Pipeline that will be created" + Type: String + Default: QSCICDPipeline + ApprovalEmail: + Description: "Email that you want to be notified for the prod approval phase" + Type: String + Default: user@domain.com + AdministrationRoleName: + Type: String + Default: AWSCloudFormationStackSetAdministrationRole + Description: "The name of the administration role. Defaults to 'AWSCloudFormationStackSetAdministrationRole'." + ExecutionRoleName: + Type: String + Default: AWSCloudFormationStackSetExecutionRole + Description: "The name of the execution role that can assume this role. Defaults to 'AWSCloudFormationStackSetExecutionRole'." + CreateBucket: + Description: "Decide if pipeline bucket should be created" + Type: String + AllowedValues: + - "true" + - "false" + Default: "true" + ConstraintDescription: "You need to specify true or false" + CreateAdmRole: + Description: "Whether or not the Admin role for self managed stack set operations should be created, choose NO if your admin account already have this role created, more info here https://a.co/e6M6aMV. Remember that you will need to deploy the provided AWSCloudFormationStackSetExecutionRole.yml stack in ALL the stage accounts" + Type: String + AllowedValues: + - "true" + - "false" + Default: "true" + ConstraintDescription: "You need to specify true or false" + + +Conditions: + CreateBucketCondition: + Fn::Equals: [Ref: CreateBucket, "true"] + CreateAdmRoleCondition: + Fn::Equals: [Ref: CreateAdmRole, "true"] + +Resources: + AdministrationRole: + Type: AWS::IAM::Role + Condition: CreateAdmRoleCondition + Properties: + RoleName: + Ref: AdministrationRoleName + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: cloudformation.amazonaws.com + Action: + - sts:AssumeRole + Path: / + Policies: + - PolicyName: + Fn::Sub: AssumeRole-${ExecutionRoleName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - sts:AssumeRole + Resource: + - Fn::Sub: 'arn:*:iam::${DevelopmentAccountId}:role/${ExecutionRoleName}' + - Fn::Sub: 'arn:*:iam::${PreProdAccountId}:role/${ExecutionRoleName}' + - Fn::Sub: 'arn:*:iam::${ProdAccountId}:role/${ExecutionRoleName}' + pipelineS3Bucket: + Type: AWS::S3::Bucket + Condition: CreateBucketCondition + Properties: + AccessControl: Private + BucketName: + Ref: PipelineS3BucketName + PublicAccessBlockConfiguration: + BlockPublicAcls: true + IgnorePublicAcls: true + BlockPublicPolicy: true + RestrictPublicBuckets: true + LifecycleConfiguration: + Rules: + - NoncurrentVersionExpiration: + NoncurrentDays: 30 + Status: Enabled + VersioningConfiguration: + Status: Enabled + WebsiteConfiguration: + IndexDocument: index.hmtl + NotificationConfiguration: + EventBridgeConfiguration: + EventBridgeEnabled: true + approvalTopic: + Type: AWS::SNS::Topic + Properties: + DisplayName: + Fn::Sub: ${PipelineName}-Topic + KmsMasterKeyId: + Ref: snsEncryptionKey + Subscription: + - Endpoint: + Ref: ApprovalEmail + Protocol: email + snsEncryptionKey: + Type: AWS::KMS::Key + Properties: + Description: + Fn::Sub: KMS key used to encrypt content of messages delivered to QS ${PipelineName} CI/CD pipeline + Enabled: True + EnableKeyRotation: True + KeyPolicy: + Version: '2012-10-17' + Id: allow-sns + Statement: + - Sid: Allow access through SNS for all principals in the account that are authorized to use SNS + Action: + - kms:Decrypt + - kms:GenerateDataKey* + - kms:CreateGrant + - kms:ListGrants + - kms:DescribeKey + Condition: + StringEquals: + kms:CallerAccount: + Ref: AWS::AccountId + kms:ViaService: sns.us-east-1.amazonaws.com + Effect: Allow + Principal: + AWS: + Fn::GetAtt: CodePipelineRole.Arn + Resource: '*' + - Sid: Allow administration of the key + Effect: Allow + Action: + - kms:Create* + - kms:Describe* + - kms:Enable* + - kms:List* + - kms:Put* + - kms:Update* + - kms:Revoke* + - kms:Disable* + - kms:Get* + - kms:Delete* + - kms:ScheduleKeyDeletion + - kms:CancelKeyDeletion + Principal: + AWS: + Ref: AccountAdminARN + Resource: '*' + - Sid: Allow direct access to key metadata to the account + Action: + - kms:Describe* + - kms:Get* + - kms:List* + - kms:RevokeGrant + Effect: Allow + Principal: + AWS: + Fn::Sub: arn:aws:iam::${AWS::AccountId}:root + Resource: '*' + KeySpec: SYMMETRIC_DEFAULT + KeyUsage: ENCRYPT_DECRYPT + + codepipeline: + Type: AWS::CodePipeline::Pipeline + DependsOn: + - CodePipelineRole + Properties: + Name: + Ref: PipelineName + PipelineType: V2 + ExecutionMode: QUEUED + RoleArn: + Fn::GetAtt: CodePipelineRole.Arn + ArtifactStore: + Type: S3 + Location: + Fn::If: [CreateBucketCondition, Ref: pipelineS3Bucket, Ref: PipelineS3BucketName] + Stages: + - Name: Source + Actions: + - Name: Source_Assets + ActionTypeId: + Category: Source + Owner: AWS + Provider: S3 + Version: "1" + OutputArtifacts: + - Name: SourceAssetsArtifact + Configuration: + PollForSourceChanges: 'false' + S3Bucket: + Fn::If: [CreateBucketCondition, Ref: pipelineS3Bucket, Ref: PipelineS3BucketName] + S3ObjectKey: + Fn::Sub: ${PipelineName}/CFNTemplates/SOURCE_assets_CFN.zip + RunOrder: 1 + Region: + Ref: S3Region + Namespace: SourceVariablesSource + - Name: Dest_Assets + ActionTypeId: + Category: Source + Owner: AWS + Provider: S3 + Version: "1" + OutputArtifacts: + - Name: DestAssetsArtifact + Configuration: + PollForSourceChanges: 'false' + S3Bucket: + Fn::If: [CreateBucketCondition, Ref: pipelineS3Bucket, Ref: PipelineS3BucketName] + S3ObjectKey: + Fn::Sub: ${PipelineName}/CFNTemplates/DEST_assets_CFN.zip + RunOrder: 2 + Region: + Ref: S3Region + Namespace: SourceVariablesDest + - Name: Deploy + Actions: + - Name: + Fn::Sub: Deploy_${Stage2Name}_Source_Assets + ActionTypeId: + Category: Deploy + Owner: AWS + Provider: CloudFormationStackSet + Version: "1" + Configuration: + DeploymentTargets: ou-my7p-vzbg144t + CallAs: DELEGATED_ADMIN + PermissionModel: SELF_MANAGED + AdministrationRoleArn: + Fn::Sub: arn:aws:iam::${AWS::AccountId}:role/${AdministrationRoleName} + ExecutionRoleName: + Ref: ExecutionRoleName + Regions: + Ref: QuickSightRegion + StackSetName: + Fn::Sub: ${PipelineName}-QSSourceAssets + TemplatePath: SourceAssetsArtifact::QS_assets_CFN_SOURCE.yaml + Parameters: + Fn::Sub: SourceAssetsArtifact::source_cfn_template_parameters_${Stage2Name}.txt + InputArtifacts: + - Name: SourceAssetsArtifact + RunOrder: 1 + Region: + Ref: "AWS::Region" + Namespace: + Fn::Sub: DeployVariablesSource${Stage2Name} + - Name: + Fn::Sub: Deploy_${Stage2Name}_Dest_Assets + ActionTypeId: + Category: Deploy + Owner: AWS + Provider: CloudFormationStackSet + Version: "1" + Configuration: + DeploymentTargets: ou-my7p-k3a361d9 + PermissionModel: SELF_MANAGED + AdministrationRoleArn: + Fn::Sub: arn:aws:iam::${AWS::AccountId}:role/${AdministrationRoleName} + ExecutionRoleName: + Ref: ExecutionRoleName + CallAs: DELEGATED_ADMIN + Regions: + Ref: QuickSightRegion + StackSetName: + Fn::Sub: ${PipelineName}-QSDestAssets + TemplatePath: DestAssetsArtifact::QS_assets_CFN_DEST.yaml + Parameters: + Fn::Sub: DestAssetsArtifact::dest_cfn_template_parameters_${Stage2Name}.txt + InputArtifacts: + - Name: DestAssetsArtifact + RunOrder: 2 + Region: + Ref: "AWS::Region" + Namespace: + Fn::Sub: DeployVariablesDest${Stage2Name} + - Name: Approval-Stage + Actions: + - Name: Manual-Approval + ActionTypeId: + Category: Approval + Owner: AWS + Provider: Manual + Version: "1" + Configuration: + NotificationArn: + Ref: approvalTopic + Region: + Ref: "AWS::Region" + RunOrder: 1 + - Name: + Fn::Sub: Deploy-${Stage3Name} + Actions: + - Name: + Fn::Sub: Deploy_${Stage3Name}_Source_Assets + ActionTypeId: + Category: Deploy + Owner: AWS + Provider: CloudFormationStackInstances + Version: "1" + Configuration: + DeploymentTargets: ou-my7p-k3a361d9 + ParameterOverrides: + Fn::Sub: SourceAssetsArtifact::source_cfn_template_parameters_${Stage3Name}.txt + Regions: + Ref: QuickSightRegion + CallAs: DELEGATED_ADMIN + StackSetName: + Fn::Sub: ${PipelineName}-QSSourceAssets + InputArtifacts: + - Name: SourceAssetsArtifact + Region: + Ref: "AWS::Region" + RunOrder: 1 + - Name: + Fn::Sub: Deploy_${Stage3Name}_Dest_Assets + ActionTypeId: + Category: Deploy + Owner: AWS + Provider: CloudFormationStackInstances + Version: "1" + Configuration: + DeploymentTargets: ou-my7p-i7nnlz6i + ParameterOverrides: + Fn::Sub: DestAssetsArtifact::dest_cfn_template_parameters_${Stage3Name}.txt + CallAs: DELEGATED_ADMIN + Regions: + Ref: QuickSightRegion + StackSetName: + Fn::Sub: ${PipelineName}-QSDestAssets + InputArtifacts: + - Name: DestAssetsArtifact + RunOrder: 2 + + EventBridgeRuleSourceAssets: + Type: AWS::Events::Rule + DependsOn: + - codepipeline + - eventBridgeRole + Properties: + Description: >- + Amazon CloudWatch Events rule to automatically start your pipeline when a change occurs in the Amazon S3 assets folder. Deleting this may prevent changes from being detected in that pipeline + EventBusName: default + EventPattern: + source: + - aws.s3 + detail-type: + - Object Created + detail: + bucket: + name: + - Fn::If: [CreateBucketCondition, Ref: pipelineS3Bucket, Ref: PipelineS3BucketName] + object: + key: + - Fn::Sub: ${PipelineName}/CFNTemplates/DEST_assets_CFN.zip + - Fn::Sub: ${PipelineName}/CFNTemplates/SOURCE_assets_CFN.zip + Name: + Fn::Sub: ${PipelineName}S3SourceRule + State: ENABLED + Targets: + - Arn: + Fn::Sub: arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:${codepipeline} + Id: + Ref: codepipeline + RoleArn: + Fn::GetAtt: eventBridgeRole.Arn + + eventBridgeRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: events.amazonaws.com + Policies: + - + PolicyName: + Fn::Sub: start-pipeline-execution-${codepipeline} + PolicyDocument: + Version: "2012-10-17" + Statement: + - + Effect: "Allow" + Action: + - codepipeline:StartPipelineExecution + Resource: + - Fn::Sub: arn:aws:codepipeline:${AWS::Region}:${AWS::AccountId}:${codepipeline} + + CodePipelineRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + + CodePipelineRolePolicy: + Type: AWS::IAM::RolePolicy + Properties: + PolicyDocument: + Version: "2012-10-17" + Statement: + - Action: + - iam:PassRole + - iam:GetAccountAuthorizationDetails + - organizations:ListAccounts + - organizations:DescribeAccount + - organizations:DescribeOrganization + - organizations:ListPoliciesForTarget + - organizations:ListDelegatedAdministrators + - organizations:ListDelegatedServicesForAccount + Resource: '*' + Effect: Allow + Condition: + StringEqualsIfExists: + iam:PassedToService: + - cloudformation.amazonaws.com + - elasticbeanstalk.amazonaws.com + - ec2.amazonaws.com + - ecs-tasks.amazonaws.com + - Action: + - iam:PassRole + Condition: + StringEqualsIfExists: + iam:PassedToService: + - cloudformation.amazonaws.com + Effect: Allow + Resource: + - Fn::GetAtt: CodePipelineRole.Arn + - Fn::Sub: arn:aws:iam::${AWS::AccountId}:role/${AdministrationRoleName} + - Action: + - cloudwatch:* + Effect: Allow + Resource: '*' + - Action: + - s3:GetObject* + - s3:PutObject* + Effect: Allow + Resource: + Fn::Sub: "arn:aws:s3:::${PipelineS3BucketName}/*" + - Action: + - sns:Publish + Effect: Allow + Resource: + Ref: approvalTopic + - Action: + - s3:ListBucket + - s3:PutBucket* + - s3:GetBucket* + Effect: Allow + Resource: + Fn::Sub: "arn:aws:s3:::${PipelineS3BucketName}" + - Action: + - cloudformation:CreateStack + - cloudformation:DeleteStack + - cloudformation:DescribeStacks + - cloudformation:UpdateStack + - cloudformation:CreateChangeSet + - cloudformation:DeleteChangeSet + - cloudformation:DescribeChangeSet + - cloudformation:ExecuteChangeSet + - cloudformation:SetStackPolicy + - cloudformation:ValidateTemplate + - cloudformation:DescribeStackSet + - cloudformation:ListStackInstances + Effect: Allow + Resource: + - "*" + PolicyName: QSCICDPipelinePolicy + RoleName: + Ref: CodePipelineRole + + + S3AssumeRole: + Type: AWS::IAM::Role + Properties: + RoleName: + Fn::Sub: DevAccountS3AccessRole-QSCICD-${PipelineName} + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + AWS: + Fn::Sub: "arn:aws:iam::${DevelopmentAccountId}:root" + Condition: + StringEquals: + sts:ExternalId: + Ref: AssumeRoleExtId + Policies: + - + PolicyName: "DevAccountWriteToResourceBucket" + PolicyDocument: + Version: "2012-10-17" + Statement: + - Action: + - s3:PutObject + - s3:ListBucket + - s3:PutObjectAcl + - s3:GetObject + - s3:GetObjectAcl + - s3:GetBucketLocation + Effect: Allow + Resource: + - Fn::Sub: "arn:aws:s3:::${PipelineS3BucketName}" + - Fn::Sub: "arn:aws:s3:::${PipelineS3BucketName}/*" + Sid: VisualEditor0 + - Action: + - dynamodb:BatchGetItem + - dynamodb:BatchWriteItem + - dynamodb:ConditionCheckItem + - dynamodb:PutItem + - dynamodb:DescribeTable + - dynamodb:DeleteItem + - dynamodb:GetItem + - dynamodb:Scan + - dynamodb:Query + - dynamodb:UpdateItem + Effect: Allow + Resource: + - Fn::Sub: arn:aws:dynamodb:${AWS::Region}:${AWS::AccountId}:table/QSAssetParameters-${PipelineName} + - Fn::Sub: arn:aws:dynamodb:${AWS::Region}:${AWS::AccountId}:table/QSTrackedAssets-${PipelineName} + paramDDBTable: + Type: AWS::DynamoDB::Table + Properties: + AttributeDefinitions: + - AttributeName: StageName + AttributeType: S + - AttributeName: AssetType + AttributeType: S + KeySchema: + - AttributeName: StageName + KeyType: HASH + - AttributeName: AssetType + KeyType: RANGE + ProvisionedThroughput: + ReadCapacityUnits: 5 + WriteCapacityUnits: 5 + TableName: + Fn::Sub: QSAssetParameters-${PipelineName} + + trackedAssetsTable: + Type: AWS::DynamoDB::Table + Properties: + AttributeDefinitions: + - AttributeName: AssetId + AttributeType: S + - AttributeName: AssetType + AttributeType: S + KeySchema: + - AttributeName: AssetId + KeyType: HASH + - AttributeName: AssetType + KeyType: RANGE + ProvisionedThroughput: + ReadCapacityUnits: 5 + WriteCapacityUnits: 5 + TableName: + Fn::Sub: QSTrackedAssets-${PipelineName} +Outputs: + Codepipeline: + Description: Link to the codepipeline created to implement QuickSight CI/CD + Value: + Fn::Sub: "https://${AWS::Region}.console.aws.amazon.com/codesuite/codepipeline/pipelines/${PipelineName}/view" \ No newline at end of file diff --git a/deployment/CFNStacks/firstStageAccount_template.yaml b/deployment/CFNStacks/firstStageAccount_template.yaml index 748ed36..fded7a6 100755 --- a/deployment/CFNStacks/firstStageAccount_template.yaml +++ b/deployment/CFNStacks/firstStageAccount_template.yaml @@ -14,9 +14,6 @@ Parameters: Description: Ext ID to be used in when assuming the IAM role in the development account Type: String - DashboardId: - Description: Dashboard ID in development you want to track changes for - Type: String DeploymentAccountId: AllowedPattern: ^[0-9]{12} Description: Account ID used for the deployment pipelines @@ -29,10 +26,7 @@ Parameters: DeploymentS3Region: Default: us-east-1 Description: Region where the deployment (CI/CD) bucket resides - Type: String - DestQSUser: - Description: Dest stage username to use to share the created QS assets with - Type: String + Type: String LayerCodeKey: AllowedPattern: ^[0-9a-zA-Z\/\-_]+\.zip Description: Key within S3 Bucket that contains the zipped code for the lambda @@ -44,10 +38,6 @@ Parameters: Description: Name of the Code Pipeline whose source assets this lambda will be contributing to Type: String - QuickSightRegion: - Default: us-east-1 - Description: Region where QuickSight assets are hosted - Type: String RemapDS: AllowedValues: - true @@ -57,6 +47,15 @@ Parameters: the dashboard datasets (when using templates) or supported properties when using Assets As Bundle (more info here https://a.co/jeHZkOr) Type: String + GenerateNestedStacks: + AllowedValues: + - true + - false + Default: true + Description: Whether or not to generate CFN nested stacks to be used by code pipeline + CAUTION, this setting helps circumvent the potential issue of reaching the max template size (1MB) + but can also break the resulting template, disable it if you experience any issues wit CFN during pipeline deployments + Type: String ReplicationMethod: AllowedValues: - TEMPLATE @@ -75,9 +74,6 @@ Parameters: AllowedPattern: ^[0-9a-z\.-]{3,63} Description: S3 Bucket containing the code Type: String - SourceQSUser: - Description: Source stage username to use to retrieve QS assets - Type: String StageNames: Default: DEV, PRE, PRO Description: List of comma-separated names of the stages that your pipeline will @@ -98,6 +94,8 @@ Resources: ManagedPolicyArns: - arn:aws:iam::aws:policy/AWSLambdaExecute - arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess + - arn:aws:iam::aws:policy/AWSQuickSightAssetBundleImportPolicy + - arn:aws:iam::aws:policy/AWSQuickSightAssetBundleExportPolicy Policies: - PolicyDocument: Statement: @@ -110,13 +108,6 @@ Resources: Resource: - Fn::Sub: arn:aws:quicksight:*:${AWS::AccountId}:dataset/* Sid: 1 - - Action: - - quicksight:StartAssetBundleExportJob - - quicksight:DescribeAssetBundleExportJob - Effect: Allow - Resource: - - Fn::Sub: arn:aws:quicksight:*:${AWS::AccountId}:asset-bundle-export-job/* - Sid: 2 - Action: sts:AssumeRole Effect: Allow Resource: @@ -186,28 +177,24 @@ Resources: Environment: Variables: ASSUME_ROLE_EXT_ID: - Ref: AssumeRoleExtId - DASHBOARD_ID: - Ref: DashboardId + Ref: AssumeRoleExtId DEPLOYMENT_ACCOUNT_ID: Ref: DeploymentAccountId DEPLOYMENT_S3_BUCKET: Ref: DeploymentS3Bucket DEPLOYMENT_S3_REGION: - Ref: DeploymentS3Region - DEST_QS_USER: - Ref: DestQSUser + Ref: DeploymentS3Region MODE: INITIALIZE PIPELINE_NAME: Ref: PipelineName REMAP_DS: Ref: RemapDS + GENERATE_NESTED_STACKS: + Ref: GenerateNestedStacks REPLICATION_METHOD: Ref: ReplicationMethod SOURCE_AWS_ACCOUNT_ID: - Ref: AWS::AccountId - SOURCE_QS_USER: - Ref: SourceQSUser + Ref: AWS::AccountId STAGES_NAMES: Ref: StageNames FunctionName: diff --git a/source/lambda/qs_assets_CFN_synthesizer/createTemplateFromAnalysis.py b/source/lambda/qs_assets_CFN_synthesizer/createTemplateFromAnalysis.py index 5ddf0fa..36996b3 100644 --- a/source/lambda/qs_assets_CFN_synthesizer/createTemplateFromAnalysis.py +++ b/source/lambda/qs_assets_CFN_synthesizer/createTemplateFromAnalysis.py @@ -5,7 +5,6 @@ import time import copy from zipfile import ZipFile -import os import boto3 from botocore.exceptions import ClientError from helpers.datasets import QSDataSetDef @@ -14,26 +13,33 @@ from helpers.datasets import ImportMode from datetime import datetime from dateutil.relativedelta import relativedelta +from dateutil.tz import tz from urllib.request import urlretrieve -now = datetime.now() +utc = tz.gettz('UTC') +utc_now = datetime.now(tz=utc) -SOURCE_AWS_ACCOUNT_ID = os.environ['SOURCE_AWS_ACCOUNT_ID'] +FIRST_STAGE_ACCOUNT_ID = os.environ['SOURCE_AWS_ACCOUNT_ID'] DEPLOYMENT_ACCOUNT_ID = os.environ['DEPLOYMENT_ACCOUNT_ID'] -SOURCE_TEMPLATE_ID = '' AWS_REGION = os.environ['AWS_REGION'] DEPLOYMENT_S3_BUCKET = os.environ['DEPLOYMENT_S3_BUCKET'] DEPLOYMENT_S3_REGION = os.environ['DEPLOYMENT_S3_REGION'] ASSUME_ROLE_EXT_ID = os.environ['ASSUME_ROLE_EXT_ID'] ANALYSIS_ID = '' -DASHBOARD_ID = os.environ['DASHBOARD_ID'] -SOURCE_QS_USER = os.environ['SOURCE_QS_USER'] -DEST_QS_USER = os.environ['DEST_QS_USER'] STAGES_NAMES = os.environ['STAGES_NAMES'] REPLICATION_METHOD = os.environ['REPLICATION_METHOD'] +GENERATE_NESTED_STACKS = os.environ['GENERATE_NESTED_STACKS'] REMAP_DS = os.environ['REMAP_DS'] PIPELINE_NAME = os.environ['PIPELINE_NAME'] if 'PIPELINE_NAME' in os.environ else '' -MODE = os.environ['MODE'] if 'MODE' in os.environ else 'INITIALIZE' +PARAMETER_DEFINITION_TABLE_NAME = 'QSAssetParameters-{pipelineName}'.format(pipelineName=PIPELINE_NAME) +MODE = os.environ['MODE'] if 'MODE' in os.environ else 'INITIALIZE' +CONFIGURATION_FILES_PREFIX = '{pipeline_name}/ConfigFiles'.format(pipeline_name=PIPELINE_NAME) +ASSETS_FILES_PREFIX = '{pipeline_name}/CFNTemplates'.format(pipeline_name=PIPELINE_NAME) +PARAMETER_DEFINITION_TABLE_NAME = 'QSAssetParameters-{pipelineName}'.format(pipelineName=PIPELINE_NAME) +TRACKED_ASSETS_TABLE_NAME = 'QSTrackedAssets-{pipelineName}'.format(pipelineName=PIPELINE_NAME) +CONFIGURATION_FILES_PREFIX = '{pipeline_name}/ConfigFiles'.format(pipeline_name=PIPELINE_NAME) +ASSETS_FILES_PREFIX = '{pipeline_name}/CFNTemplates'.format(pipeline_name=PIPELINE_NAME) + DEPLOYMENT_DEV_ACCOUNT_ROLE_ARN = 'arn:aws:iam::{deployment_account_id}:role/DevAccountS3AccessRole-QSCICD-{pipeline_name}'.format(deployment_account_id=DEPLOYMENT_ACCOUNT_ID, pipeline_name=PIPELINE_NAME) OUTPUT_DIR = '/tmp/output/' @@ -48,40 +54,43 @@ qs = boto3.client('quicksight', region_name=AWS_REGION) -def generateQSTemplateCFN(analysisDefObj:QSAnalysisDef): +def generateQSTemplateCFN(analysisDefObj:QSAnalysisDef, appendContent:dict): """Function that generates a Cloudformation AWS::QuickSight::Template resource https://a.co/7A8bfh7 synthesized from a given analysisName Parameters: analysisDefObj (QSAnalysisDef): Analysis name that will be templated + appendContent(dict): Dictionary that represents the CFN template object (already built by other methods) where we want to append elements - Returns: - dict: yaml_template Object that represents the synthesized CFN template - str: templateId String that represents the templateId of the synthesized template + dict: appendContent Object that represents the synthesized CFN template Example: >>> generateQSTemplateCFN('Analysis Name', {'Dataset1': 'DatasetPlaceholder1', 'Dataset2': 'DatasetPlaceholder2'}, 'Analysis ARN') """ - TEMPLATE_VERSION = 'QS_CI_CD_TEMPLATE_{suffix}'.format(suffix=now.strftime('%d-%m-%y-%H-%M-%S')) + template_version = 'QS_CI_CD_TEMPLATE_ANALYSIS_{analysis_id}_{suffix}'.format(suffix=utc_now.strftime('%d-%m-%y-%H-%M-%S'), analysis_id=analysisDefObj.id) + + if appendContent is None: + print("Append content is None") + raise ValueError("Error in createTemplateFromAnalysis:generateQSTemplateCFN, Append content is None") - with open('resources/template_CFN_skel.yaml', 'r') as file: + with open('resources/template_resource_CFN_skel.yaml', 'r') as file: yaml_template = yaml.safe_load(file) - template_properties = yaml_template['Resources']['CICDQSTemplate']['Properties'] - analysis_name_sanitized = analysisDefObj.name.replace(' ', '-') - templateId = '{analysis_name}-template'.format(analysis_name=analysis_name_sanitized) + template_properties = yaml_template['Properties'] analysis_id = analysisDefObj.id # properties in template + templateId = analysisDefObj.TemplateId + templateCFNResourceId = 'Template{analysis_cfn_id}'.format(analysis_cfn_id=analysisDefObj.CFNId) template_properties['SourceEntity']['SourceAnalysis']['Arn']['Fn::Sub'] = template_properties['SourceEntity']['SourceAnalysis']['Arn']['Fn::Sub'].replace('{analysis_id}', analysis_id) template_properties['TemplateId'] = templateId template_properties['Name'] = 'CI CD Template for analysis {name}'.format(name=analysisDefObj.name) - template_properties['VersionDescription'] = TEMPLATE_VERSION + template_properties['VersionDescription'] = template_version # set up dataset references @@ -101,7 +110,9 @@ def generateQSTemplateCFN(analysisDefObj:QSAnalysisDef): template_properties['SourceEntity']['SourceAnalysis']['DataSetReferences'] = dataset_ref_list - return yaml_template, templateId + appendContent['Resources'][templateCFNResourceId] = yaml_template + + return appendContent def generateDataSourceObject(datasourceId:str, datasourceIndex:int): @@ -111,7 +122,7 @@ def generateDataSourceObject(datasourceId:str, datasourceIndex:int): dataSourceDefObj = {} DSparameters ={} - ret = qs.describe_data_source(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSourceId=datasourceId) + ret = qs.describe_data_source(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSourceId=datasourceId) dsType = ret['DataSource']['Type'] datasourceName = ret['DataSource']['Name'] datasourceArn = ret['DataSource']['Arn'] @@ -123,7 +134,7 @@ def generateDataSourceObject(datasourceId:str, datasourceIndex:int): DSparameters['Bucket'] = ret['DataSource']['DataSourceParameters']['S3Parameters']['ManifestFileLocation']['Bucket'] DSparameters['Key'] = ret['DataSource']['DataSourceParameters']['S3Parameters']['ManifestFileLocation']['Key'] else: - raise ValueError("Error in createTemplateFromAnalysis:generateDataSourceObject, S3 datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} has no DataSourceParameters. S3 datasources need to use a manifest file stored in S3). Cannot proceed further. Consider using REMAP_DS: ''TRUE''".format(datasource_name=datasourceName, index=datasourceIndex, type=dsType, datasource_id=datasourceId)) + raise ValueError("Error in createTemplateFromAnalysis:generateDataSourceObject, S3 datasource {datasource_name} (datasource:{datasource_id}, type {type}) with index {index} has no DataSourceParameters. S3 datasources need to use a manifest file stored in S3). Cannot proceed further. Consider using REMAP_DS: ''TRUE''".format(datasource_name=datasourceName, index=datasourceIndex, type=dsType, datasource_id=datasourceId)) if dsType == SourceType.ATHENA.name: DSparameters['WorkGroup'] = ret['DataSource']['DataSourceParameters']['AthenaParameters']['WorkGroup'] @@ -132,7 +143,7 @@ def generateDataSourceObject(datasourceId:str, datasourceIndex:int): if dsType in RDMBS_DS: if 'SecretArn' not in ret['DataSource']: - raise ValueError("Datasource {datasource_name} (ID {datasource_id}) is a {type} datasource and it is not configured with a secret, cannot proceed".format(type=dsType, datasource_name=datasourceName, datasource_id=datasourceId)) + raise ValueError("Datasource {datasource_name} (datasource:{datasource_id}) is a {type} datasource and it is not configured with a secret, cannot proceed".format(type=dsType, datasource_name=datasourceName, datasource_id=datasourceId)) DSparameters['SecretArn'] = ret['DataSource']['SecretArn'] @@ -186,7 +197,7 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, print("Append content is None") raise ValueError("Error in createTemplateFromAnalysis:generateDataSourceCFN, Append content is None") - with open('resources/datasource_CFN_skel.yaml', 'r') as file: + with open('resources/datasource_resource_CFN_skel.yaml', 'r') as file: yaml_datasource = yaml.safe_load(file) datasourceIdKey = datasourceDefObj.CFNId @@ -204,11 +215,11 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, dsType = datasourceDefObj.type - print("Processing datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index}".format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType)) + print("Processing datasource {datasource_name} (datasource:{datasource_id}, type {type})".format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType)) if dsType == SourceType.S3: - destBucketKey = '{type}DestinationBucket{index}'.format(index=index, type=dsType.name) - destKeyKey = '{type}DestinationKey{index}'.format(index=index, type=dsType.name) + destBucketKey = '{cfnid}{type}DestinationBucket'.format(cfnid=datasourceIdKey, type=dsType.name) + destKeyKey = '{cfnid}{type}DestinationKey'.format(cfnid=datasourceIdKey, type=dsType.name) templateS3Parameters = { 'S3Parameters': { 'ManifestFileLocation': {} @@ -217,12 +228,16 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, if remap: appendContent['Parameters'].update({ destBucketKey: { - 'Description' : 'S3 bucket to use for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType), - 'Type': 'String' + 'Description' : 'S3 bucket to use for datasource {datasource_name} (datasource:{datasource_id}, type {type}) in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['Bucket'] }, destKeyKey: { - 'Description' : 'S3 key to use for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType), - 'Type': 'String' + 'Description' : 'S3 key to use for datasource {datasource_name} (datasource:{datasource_id}, type {type}) in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['Key'] } }) templateS3Parameters['S3Parameters']['ManifestFileLocation']['Bucket'] = { @@ -244,11 +259,15 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, 'AthenaParameters': {} } if remap: - athenaWorkgroupKey = '{type}Workgroup{index}'.format(index=index, type=dsType.name) + athenaWorkgroupKey = '{cfnid}{type}Workgroup'.format(cfnid=datasourceIdKey, type=dsType.name) appendContent['Parameters'].update({ athenaWorkgroupKey: { - 'Description' : 'Athena Workgroup to use for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType.name), - 'Type': 'String' + 'Description' : 'Athena Workgroup to use for datasource {datasource_name} (datasource:{datasource_id}, type {type}) in the stage, \ + to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline.\ + This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['WorkGroup'] } }) templateAthenaParameters['AthenaParameters']['WorkGroup'] = { @@ -263,32 +282,31 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, properties['Type'] = dsType.name properties['DataSourceParameters'] = templateAthenaParameters - - - - - if dsType.name in RDMBS_DS: + dsSecretKey = '{cfnid}SecretArn'.format(cfnid=datasourceIdKey) properties['Credentials'] = { 'SecretArn': { - 'Ref': 'DSSecretArn' + 'Ref': dsSecretKey } } appendContent['Parameters'].update({ - 'DSSecretArn': { - 'Description' : 'Secret Arn to use in the stage, to be parametrized via CFN', + dsSecretKey: { + 'Description' : 'Secret Arn to use for datasource {datasource_name} (datasource:{datasource_id}, type {type}) in the stage, to be parametrized via CFN' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), 'Type': 'String' } }) if datasourceDefObj.vpcConnectionArn != '': + vpcConnectionKey = '{cfnid}VpcConnectionArn'.format(cfnid=datasourceIdKey) properties['VpcConnectionProperties'] = { 'VpcConnectionArn': { - 'Ref': 'VpcConnectionArn' + 'Ref': vpcConnectionKey } } appendContent['Parameters'].update({ - 'VpcConnectionArn': { - 'Description' : 'VPC Connection Arn to use in the stage, to be parametrized via CFN', + vpcConnectionKey: { + 'Description' : 'VPC Connection Arn to use for datasource {datasource_name} (datasource:{datasource_id}, type {type}) in the stage, to be parametrized via CFN' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), 'Type': 'String' } } @@ -296,8 +314,8 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, if isinstance(datasourceDefObj, QSRDSDatasourceDef): #its an RDS datasource - rdsInstanceParam = 'RDSInstanceID{index}'.format(index=index) - databaseParam = 'RDSDBName{index}'.format(index=index) + rdsInstanceParam = '{cfnid}RDSInstanceID'.format(cfnid=datasourceIdKey) + databaseParam = '{cfnid}RDSDBName'.format(cfnid=datasourceIdKey) templateDSParameters = { 'RdsParameters' : { } @@ -305,12 +323,16 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, if remap: appendContent['Parameters'].update({ rdsInstanceParam: { - 'Description' : 'RDS Instance Id for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType.name), - 'Type': 'String' + 'Description' : 'RDS Instance Id for datasource {datasource_name} (datasource:{datasource_id}, type {type}) in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['InstanceId'] }, databaseParam: { - 'Description' : 'Database name for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType.name), - 'Type': 'String' + 'Description' : 'Database name for datasource {datasource_name} (datasource:{datasource_id}, type {type}) in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['Database'] } }) templateDSParameters['RdsParameters']['InstanceId'] = { @@ -331,21 +353,27 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, } } if remap: - databaseParam = '{type}DBName{index}'.format(index=index, type=dsType.name) - portParam = '{type}Port{index}'.format(index=index, type=dsType.name) - hostParam = '{type}Host{index}'.format(index=index,type=dsType.name) + databaseParam = '{cfnid}{type}DBName'.format(cfnid=datasourceIdKey, type=dsType.name) + portParam = '{cfnid}{type}Port'.format(cfnid=datasourceIdKey, type=dsType.name) + hostParam = '{cfnid}{type}Host'.format(cfnid=datasourceIdKey,type=dsType.name) appendContent['Parameters'].update({ databaseParam: { - 'Description' : 'Database name for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType.name), - 'Type': 'String' + 'Description' : 'Database name for datasource {datasource_name} (datasource:{datasource_id}, type {type}) to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['Database'] }, portParam: { - 'Description' : 'Database port for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType.name), - 'Type': 'Number' + 'Description' : 'Database port for datasource {datasource_name} (datasource:{datasource_id}, type {type}) to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), + 'Type': 'Number', + 'Default': datasourceDefObj.parameters['Port'] }, hostParam: { - 'Description' : 'Database host for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType.name), - 'Type': 'String' + 'Description' : 'Database host for datasource {datasource_name} (datasource:{datasource_id}, type {type}) to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline. This parameter was added because REMAP_DS parameter was set in the synthesizer lambda' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['Host'] } }) @@ -366,11 +394,13 @@ def generateDataSourceCFN(datasourceDefObj: QSDataSourceDef, appendContent:dict, if dsType == SourceType.REDSHIFT: if remap: - RSclusterIdParam = '{type}ClusterId{index}'.format(index=index,type=dsType.name) + RSclusterIdParam = '{cfnid}{type}ClusterId'.format(cfnid=datasourceIdKey,type=dsType.name) appendContent['Parameters'].update({ RSclusterIdParam: { - 'Description' : 'ClusterId for datasource {datasource_name} (ID {datasource_id}, type {type}) with index {index} to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline'.format(datasource_name=datasourceName, index=index, datasource_id=datasourceDefObj.id, type=dsType.name), - 'Type': 'String' + 'Description' : 'ClusterId for datasource {datasource_name} (datasource:{datasource_id}, type {type}) to use in the stage, to be parametrized via CFN deploy action in codepipeline see https://a.co/2aOOOTA for more information about how to set it in Codepipeline' + .format(datasource_name=datasourceName, datasource_id=datasourceDefObj.id, type=dsType.name), + 'Type': 'String', + 'Default': datasourceDefObj.parameters['ClusterId'] } }) templateDSParameters[datasourceParametersKey]['ClusterId'] = { @@ -413,7 +443,7 @@ def generateDataSetCFN(datasetObj: QSDataSetDef, datasourceObjs: QSDataSourceDef dependingResources = [] datasetId = datasetObj.id - ret = qs.describe_data_set(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSetId=datasetId) + ret = qs.describe_data_set(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSetId=datasetId) with open('resources/dataset_resource_CFN_skel.yaml', 'r') as file: yaml_dataset = yaml.safe_load(file) @@ -464,9 +494,13 @@ def generateDataSetCFN(datasetObj: QSDataSetDef, datasourceObjs: QSDataSourceDef if datasetObj.rlsDSetDef is not None: #This dataset contains a RLS dataset, so we need to update properties and dependencies in template accordingly rlsDSetId = datasetObj.rlsDSetDef['Arn'].split('dataset/')[-1] + rslDsetCFNId = 'DSet{id}'.format(id=rlsDSetId.replace('-', '')) dependingResources.append('DSet{id}'.format(id=rlsDSetId.replace('-', ''))) datasetObj.rlsDSetDef['Arn'] = { - 'Fn::Sub': 'arn:aws:quicksight:${AWS::Region}:${AWS::AccountId}:dataset/${datasetId}'.replace('${datasetId}', rlsDSetId) + 'Fn::GetAtt': [ + rslDsetCFNId, + 'Arn' + ] } appendContent['Resources'][dataSetIdKey]['Properties']['RowLevelPermissionDataSet'] = datasetObj.rlsDSetDef @@ -480,7 +514,7 @@ def generateRowLevelPermissionDataSetCFN( appendContent:dict, targetDatasetIdKey Args: appendContent (dict): Dictionary containing the definition of Cloudformation template elements targetDatasetIdKey (str): Dataset CFNId this RLS applies to - rlsDatasetDef (dict): Object defining the RLS dataset to be appplied to the target dataset + rlsDatasetDef (dict): Object defining the RLS dataset to be applied to the target dataset datasourceOrd(int): number of datasources that have been generated (used to build the parameters in cloudformation) lambdaEvent (dict): Lambda event that contains optional parameters that alter the CFN resource that will be created, for example the REMAP_DS that, if provided \ will generate a parametrized CFN template to replace datasource parameters @@ -493,11 +527,11 @@ def generateRowLevelPermissionDataSetCFN( appendContent:dict, targetDatasetIdKey ret_refresh_schedules = [] rlsDatasetId = rlsDatasetDef['Arn'].split('dataset/')[-1] - retRLSDSet = qs.describe_data_set(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSetId=rlsDatasetId) + retRLSDSet = qs.describe_data_set(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSetId=rlsDatasetId) if retRLSDSet['DataSet']['ImportMode'] == ImportMode.SPICE.name: importMode = ImportMode.SPICE - ret_refresh_schedules = qs.list_refresh_schedules(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSetId=rlsDatasetId) + ret_refresh_schedules = qs.list_refresh_schedules(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSetId=rlsDatasetId) else: importMode = ImportMode.DIRECT_QUERY @@ -547,7 +581,7 @@ def generateAnalysisFromTemplateCFN(analysisObj: QSAnalysisDef, templateId:str, """ - analysis_tag = 'UPDATED_{suffix}'.format(suffix=now.strftime('%d-%m-%y-%H-%M-%S')) + analysis_tag = 'UPDATED_{suffix}'.format(suffix=utc_now.strftime('%d-%m-%y-%H-%M-%S')) with open('resources/analysis_resource_CFN_skel.yaml', 'r') as file: yaml_analysis = yaml.safe_load(file) @@ -567,7 +601,7 @@ def generateAnalysisFromTemplateCFN(analysisObj: QSAnalysisDef, templateId:str, ] sourceTemplateArnJoinObj = { - 'Fn::Sub': 'arn:aws:quicksight:${AWS::Region}:${SourceAccountID}:template/{template_id}'.replace('{template_id}', templateId) + 'Fn::Sub': 'arn:aws:quicksight:${SrcQSRegion}:${SourceAccountID}:template/{template_id}'.replace('{template_id}', templateId) } properties['SourceEntity']['SourceTemplate']['Arn'] = sourceTemplateArnJoinObj @@ -617,12 +651,11 @@ def generateRefreshSchedulesCFN(datasetObj: QSDataSetDef, appendContent: dict): DSETIdSanitized = datasetObj.id.replace('-', '') - ret = qs.list_refresh_schedules(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSetId=datasetObj.id) - now = datetime.now() + ret = qs.list_refresh_schedules(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSetId=datasetObj.id) for schedule in ret['RefreshSchedules']: refresh_schedule_id = schedule['ScheduleId'] - retSchedule = qs.describe_refresh_schedule(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSetId=datasetObj.id, ScheduleId=refresh_schedule_id) + retSchedule = qs.describe_refresh_schedule(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSetId=datasetObj.id, ScheduleId=refresh_schedule_id) with open('resources/dataset_refresh_schedule_CFN_skel.yaml', 'r') as file: yaml_schedule = yaml.safe_load(file) @@ -632,11 +665,11 @@ def generateRefreshSchedulesCFN(datasetObj: QSDataSetDef, appendContent: dict): yaml_schedule['Properties']['Schedule']['ScheduleFrequency']['TimeZone'] = yaml_schedule['Properties']['Schedule']['ScheduleFrequency'].pop('Timezone') scheduleFrequency = retSchedule['RefreshSchedule']['ScheduleFrequency']['Interval'] if scheduleFrequency == 'MONTHLY': - futurestartAfterTimeTz = now + relativedelta(months=+1) + futurestartAfterTimeTz = utc_now + relativedelta(months=+1) elif scheduleFrequency == 'WEEKLY': - futurestartAfterTimeTz = now + relativedelta(weeks=+1) + futurestartAfterTimeTz = utc_now + relativedelta(weeks=+1) else: - futurestartAfterTimeTz = now + relativedelta(days=+7) + futurestartAfterTimeTz = utc_now + relativedelta(days=+7) # Remove timezone info as it is included separately in the object yaml_schedule['Properties']['Schedule']['StartAfterDateTime'] = futurestartAfterTimeTz.strftime('%Y-%m-%dT%H:%M:%SZ') scheduleCFNId = 'RSchedule{id}'.format(id=refresh_schedule_id.replace('-', '')) @@ -737,6 +770,47 @@ def uploadFileToS3(bucket: str, filename: str, region: str, bucket_owner:str, pr return False return True +#helper function to generate a presigned url in S3 from a given s3 url +def generatePresignedUrl(bucket: str, key:str, region: str, credentials=None): + + """ + Helper function that generates a presigne url in S3 from a given s3 url + + Parameters: + + s3_url(str): S3 url + region(str): AWS region where the bucket is located + credentials(dict): AWS credentials to be used in the upload operation + + Returns: + + str: Presigned url + + Examples: + + >>> generatePresignedUrl(s3_url=s3_url, region=region, credentials=credentials) + + """ + if credentials is None: + s3 = boto3.client('s3', region_name=region) + else: + s3 = boto3.client('s3', region_name=region, aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken']) + + # Generate a presigned URL for an S3 object + expires_in_seconds = 3600 + + presigned_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': bucket, + 'Key': key + }, + ExpiresIn=expires_in_seconds + ) + + return presigned_url + + def get_s3_objects(bucket: str, prefix: str, region: str, credentials=None): @@ -779,6 +853,168 @@ def get_s3_objects(bucket: str, prefix: str, region: str, credentials=None): return downloaded_files +## Helper function that stores dashboard parameter definition in JSON into a given dynamo db table +def store_dashboard_parameter_definition_in_dynamo(parameter_definition: dict, table_name: str, assetType:str, stage:str, region:str, parameter_help:dict, credentials=None): + """ + Helper function that stores dashboard parameter definition in JSON into a given dynamo db table + + Parameters: + + parameter_definition(dict): Dashboard parameter definition in JSON + parameter_help(dict): Parameter dictionary that contains a list of default values, description and type information for each parameter + table_name(str): Name of the dynamo db table where the parameter definition will be stored + credentials(dict): AWS credentials to be used in the upload operation + assetType(str): Type of the asset (valid values are dest or source) + stage(str): Stage of the dashboard (e.g. dev, prod) + + Returns: + + True if the parameter definition was stored successfully, False otherwise + + Examples: + + >>> store_dashboard_parameter_definition_in_dynamo(parameter_definition=parameter_definition, table_name=table_name, assetId=assetId, stage=stage, assetType=assetType) + + """ + + if assetType not in ['dest', 'source']: + raise ValueError('Invalid asset type {assetType}, should be either dest or source'.format(assetType=assetType)) + + if credentials is None: + dynamodb = boto3.resource('dynamodb', region_name=region) + else: + dynamodb = boto3.resource('dynamodb', region_name=region, aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken']) + + table = dynamodb.Table(table_name) + + try: + response = table.put_item( + Item= { + 'AssetType': assetType, + 'StageName': stage, + 'ParameterDefinition': parameter_definition, + 'ParameterDefinitionHelp': parameter_help + } + ) + except ClientError as e: + logging.error(e) + return False + return True + +def read_dashboard_parameter_definition_from_dynamo(table_name: str, assetType:str, stage:str, region:str, credentials=None): + """ + Helper function that reads the QuickSight asset parameter definition from a given DynamoDB table using sts provided credentials + + Parameters: + + table_name(str): Name of the dynamo db table where the parameter definition is stored + credentials(dict): AWS credentials to be used in the upload operation + assetType(str): Type of the asset (valid values are dest or source) + stage(str): Stage of the dashboard (e.g. dev, prod) + + + + Returns: + + parameter_definition(dict): Dashboard parameter definition in JSON + + Examples: + + >>> read_dashboard_parameter_definition_from_dynamo(table_name=table_name, assetId=assetId, stage=stage, assetType=assetType) + + """ + + if credentials is None: + dynamodb = boto3.resource('dynamodb', region_name=region) + else: + dynamodb = boto3.resource('dynamodb', region_name=region, aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken']) + + table = dynamodb.Table(table_name) + + try: + response = table.get_item( + Key={ + 'AssetType': assetType, + 'StageName': stage + } + ) + # filter out items based on assetType + if 'Item' not in response: + raise ValueError('No configuration item found in the response when querying {table} parameter table for stage {stage} and assetId {assetId}'.format(table=table_name, stage=stage, assetId=assetType)) + parameter_definition = json.loads(response['Item']['ParameterDefinition']) + except ClientError as e: + logging.error(e) + return {} + + return parameter_definition + +def read_all_assetIds_from_dynamo(region:str, credentials=None, table_name=TRACKED_ASSETS_TABLE_NAME): + """ + Helper function that reads all the assetIds defined in a given DynamoDB table and returns the set of items + + Parameters: + + region(str): The AWS region where the table is located + credentials(dict): AWS credentials to be used in the upload operation + table_name(str): Name of the dynamo db table where the parameter definition is stored, defaults to the TRACKED_ASSETS_TABLE_NAME + + Returns: + + assetIds(set): List of assetIds + + Examples: + + >>> read_all_assetIds_from_dynamo(table_name=table_name, credentials=credentials) + + """ + + if credentials is None: + dynamodb = boto3.resource('dynamodb', region_name=region) + else: + dynamodb = boto3.resource('dynamodb', region_name=region, aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken']) + + table = dynamodb.Table(table_name) + + try: + response = table.scan() + assetIds = [item['AssetId'] for item in response['Items']] + except ClientError as e: + logging.error(e) + + return set(assetIds) + +# helper function to validate if a given asset Id is a QuickSight dashboard +def validate_asset_id(assetId:str, region:str, credentials=None): + """ + Helper function that validates if a given asset Id is a QuickSight dashboard + + Parameters: + + assetId(str): Asset ID of the dashboard + region(str): The AWS region where the table is located + credentials(dict): AWS credentials to be used in the upload operation + + Returns: + + True if the assetId is a QuickSight dashboard, False otherwise + + Examples: + + >>> validate_asset_id(assetId=assetId, region=region, credentials=credentials) + + """ + + quicksight = boto3.client('quicksight', region_name=region) + + try: + response = quicksight.describe_dashboard(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DashboardId=assetId) + except quicksight.exceptions.ResourceNotFoundException as e: + print('The assetId {assetId} configured in the source DDB parameter table is not a QuickSight dashboard or the IAM role used by the function doesn''t have access to it, please fix and retry.'.format(assetId=assetId)) + print('At the moment only dashboard objects are supported in the code for this Guidance') + return False + + return True + def writeToFile(filename: str, content: object, format="yaml"): """ Helper function that writes the contents of the object to a file @@ -798,7 +1034,7 @@ def writeToFile(filename: str, content: object, format="yaml"): """ - with open(filename, '+w') as file: + with open(filename, 'w+') as file: if format == 'yaml': yaml.dump(content, file) elif format == 'json': @@ -852,7 +1088,7 @@ def summarize_template(template_content: dict, templateName: str, s3Credentials: Returns: - bool: True if execution was successful + parameters_info(dict): Dictionary with the parameters and their description Examples: @@ -864,31 +1100,38 @@ def summarize_template(template_content: dict, templateName: str, s3Credentials: print(DIVIDER_SECTION) print("Template {template_name} doesn't contain any parameters, nothing to do in this file".format(template_name=templateName)) print(DIVIDER_SECTION) - return True + return {} - parameters = template_content['Parameters'] + parameters_info = template_content['Parameters'] paramFilename = '{output_dir}/{template_name}_README.json'.format(output_dir=OUTPUT_DIR, template_name=templateName) - writeToFile(content=parameters, filename=paramFilename, format="json") - uploadFileToS3(bucket=DEPLOYMENT_S3_BUCKET, filename=paramFilename, region=AWS_REGION, object_name='{template_name}_README.json'.format(template_name=templateName), prefix=conf_files_prefix, bucket_owner=DEPLOYMENT_ACCOUNT_ID, credentials=s3Credentials) + writeToFile(content=parameters_info, filename=paramFilename, format="json") + uploadFileToS3(bucket=DEPLOYMENT_S3_BUCKET, filename=paramFilename, region=AWS_REGION, object_name='{template_name}_README.json'.format(template_name=templateName), + prefix=conf_files_prefix, bucket_owner=DEPLOYMENT_ACCOUNT_ID, credentials=s3Credentials) print(DIVIDER_SECTION) - print("Template {template_name} contains parameters that need to be set in CodePipeline's CloudFormation artifact via file. This file has been uploaded to {file_location}, each development stage will have its own pair of parametrization files (source and dest).\ - You will need to download this file, edit the parameters according to your environments ({environments}) and then execute this function with \"MODE\" : \"DEPLOY\" key present in the lambda event. Refer to https://a.co/0DrKhVm for more \ - information on how to use this file".format(template_name=templateName, file_location=conf_files_prefix, environments=parameters.keys())) - print("Find below a lisf of the parameters needed, also README.json files for each stack (source and dest) have been created on the aforementioned S3 bucket and prefix:") + print("Template {template_name} contains parameters that need to be set in CodePipeline's CloudFormation artifact. These can be configured in the DynamoDB table in your DEPLOYMENT" \ + "account ({deployment_account_id}) {ddb_param_table} each development stage two records in this table with a different AssetType attribute value (source and dest)."\ + "You will need to open this table in the DDB console and edit the records for each of the environments ({environments}) filling the ParameterDefinition attribute as"\ + "needed and then execute this function with \"MODE\" : \"DEPLOY\" key present in the lambda event. Refer to https://a.co/0DrKhVm for more information on how to use this file" + .format(template_name=templateName, ddb_param_table=PARAMETER_DEFINITION_TABLE_NAME, deployment_account_id=DEPLOYMENT_ACCOUNT_ID, environments=parameters_info.keys())) + print("You can access {ddb_param_table} table directly on the console using this link in your DEPLOYMENT_ACCOUNT ({deployment_account_id}): "\ + "https://{region}.console.aws.amazon.com/dynamodbv2/home?region={region}#item-explorer?fromTables=true&maximize=true&table={ddb_param_table}" + .format(region=AWS_REGION, ddb_param_table=PARAMETER_DEFINITION_TABLE_NAME, deployment_account_id=DEPLOYMENT_ACCOUNT_ID)) + print("Find below a list of the parameters needed for each stack (source and dest) this information is also available under the ParameterDefinitionHelp attribute for each DynamoDB record in the {ddb_param_table} table" + .format(ddb_param_table=PARAMETER_DEFINITION_TABLE_NAME)) print("") - for parameter in parameters.keys(): - print("{parameter}: {description}".format(parameter=parameter, description=parameters[parameter]['Description'])) + for parameter in parameters_info.keys(): + print("{parameter}: {description}".format(parameter=parameter, description=parameters_info[parameter]['Description'])) - param_formatted = [ 'ParameterKey={parameter},ParameterValue='.format(parameter=parameter) for parameter in parameters.keys() ] + param_formatted = [ 'ParameterKey={parameter},ParameterValue='.format(parameter=parameter) for parameter in parameters_info.keys() ] print("") print("Remember that you can still define parameter override values in CodePipeline deploy actions following this format, however for scalability configuration files are recommended to be used instead:") print('\n'.join(param_formatted)) print(DIVIDER_SECTION) - return True + return parameters_info def generate_cloudformation_template_parameters(template_content: dict): """ @@ -929,35 +1172,66 @@ def generate_cloudformation_template_parameters(template_content: dict): return parameter_list -def check_parameters_cloudformation(param_list, key, region, credentials): +def check_parameters_cloudformation(template_param_list, region, credentials, assetType): + """ + Helper function that checks if the parameters defined in the template are the same as the ones defined in the DynamoDB parameter definition table + + Parameters: - s3 = boto3.client('s3', region_name=region, aws_access_key_id=credentials['AccessKeyId'], aws_secret_access_key=credentials['SecretAccessKey'], aws_session_token=credentials['SessionToken']) + template_param_list(list): List with parameter objects to be filled up by the user + region(str): The AWS region where the table is located + credentials(dict): AWS credentials to be used in the upload operation + assetType(str): Type of asset, either 'dest' or 'source' - print('Checking CFN parameters, S3 object {key} in Bucket {bucket}...'.format(key=key, bucket=DEPLOYMENT_S3_BUCKET)) - ret = s3.get_object(Bucket=DEPLOYMENT_S3_BUCKET, Key=key) + Returns: - param_object = json.loads(ret['Body'].read()) - - param_object_keys = [] - param_list_keys = [] + None - for parameter in param_object: - param_object_keys.append(parameter['ParameterKey']) + Examples: + + >>> check_parameters_cloudformation(template_param_list=template_param_list, region=region, credentials=credentials, assetType=assetType, assetId=assetId) + + """ + + if assetType not in ['dest', 'source']: + raise ValueError('Invalid asset type {assetType}, should be either dest or source'.format(assetType=assetType)) + - for parameter in param_list: - param_list_keys.append(parameter['ParameterKey']) + deployment_stages = STAGES_NAMES.split(",")[1:] + for stage in deployment_stages: + stage = stage.strip() + print('Checking {asset_type} parameters for stage {stage}'.format(stage=stage, asset_type=assetType)) + key = '{prefix}/{asset_type}_cfn_template_parameters_{stage}.txt'.format(prefix=CONFIGURATION_FILES_PREFIX, asset_type=assetType, stage=stage.strip()) + file_param_obj = read_dashboard_parameter_definition_from_dynamo(table_name=PARAMETER_DEFINITION_TABLE_NAME, assetType=assetType, stage=stage, region=region, credentials=credentials) + + file_param_object_keys = [] + template_param_object_keys = [] - if set(param_object_keys) != set(param_list_keys): - if set(param_object_keys) > set(param_list_keys): - parameters_in_error = set(param_object_keys) - set(param_list_keys) - error = 'Not all the parameters configured in your CFN parameter S3 configuration file {key} in Bucket {bucket} are needed in CFN... Extra parameters in S3 are {parameters_in_error}. Please, correct file and try again, consider running MODE: ''INITIALIZE'' to fix this.'.format(key=key, bucket=DEPLOYMENT_S3_BUCKET, parameters_in_error=parameters_in_error) + for parameter in file_param_obj: + file_param_object_keys.append(parameter['ParameterKey']) + + for parameter in template_param_list: + template_param_object_keys.append(parameter['ParameterKey']) + + if set(file_param_object_keys) != set(template_param_object_keys): + if set(file_param_object_keys) > set(template_param_object_keys): + parameters_in_error = set(file_param_object_keys) - set(template_param_object_keys) + error = 'Not all the parameters configured in your DynamoDB parameter definition table {table} for stage {stage} and assetType {asset_type} are needed in CFN... \ + Extra parameters in DynamoDB table are {parameters_in_error}. Please, correct file and try again, consider running MODE: ''INITIALIZE'' to fix this.'\ + .format(table=PARAMETER_DEFINITION_TABLE_NAME, stage=stage, asset_type=assetType, parameters_in_error=parameters_in_error) + else: + parameters_in_error = set(template_param_object_keys) - set(file_param_object_keys) + error = 'Not all the needed CFN parameters were found in your DynamoDB parameter definition table {table} for stage {stage} and assetType {asset_type}...\ + Missing parameters in DynamoDB table are {parameters_in_error}. Please, add them and try again'.format(table=PARAMETER_DEFINITION_TABLE_NAME, stage=stage, asset_type=assetType, + parameters_in_error=parameters_in_error) + print(error) + raise ValueError(error) else: - parameters_in_error = set(param_list_keys) - set(param_object_keys) - error = 'Not all the needed CFN parameters were found in environment configuration file {key} in Bucket {bucket}... Missing parameters in S3 are {parameters_in_error}. Please, add them and try again'.format(key=key, bucket=DEPLOYMENT_S3_BUCKET, parameters_in_error=parameters_in_error) - print(error) - raise ValueError(error) - else: - print('All the needed CFN parameters were found in environment configuration file {key} in Bucket {bucket}...'.format(key=key, bucket=DEPLOYMENT_S3_BUCKET)) + print('All the needed CFN parameters were found in the DynamoDB parameter definition table {table} for stage {stage} and assetType {asset_type}' + .format(table=PARAMETER_DEFINITION_TABLE_NAME, stage=stage, asset_type=assetType)) + + param_file_path = writeToFile('{output_dir}/{asset_type}_cfn_template_parameters_{stage}.txt'.format(output_dir=OUTPUT_DIR, asset_type=assetType, stage=stage.strip()), content=file_param_obj, format='json') + uploadFileToS3(bucket=DEPLOYMENT_S3_BUCKET, filename=param_file_path, region=region, object_name=os.path.basename(key), prefix=CONFIGURATION_FILES_PREFIX, bucket_owner=DEPLOYMENT_ACCOUNT_ID, credentials=credentials) return @@ -1035,7 +1309,7 @@ def add_permissions_to_AAB_resources(template_content:dict): # Get the list of AAB resources aab_resources = template_content['Resources'] - with open('resources/datasource_CFN_skel.yaml', 'r') as file: + with open('resources/datasource_resource_CFN_skel.yaml', 'r') as file: yaml_datasource = yaml.safe_load(file) datasource_permissions_obj = yaml_datasource['Properties']['Permissions'] @@ -1047,12 +1321,16 @@ def add_permissions_to_AAB_resources(template_content:dict): yaml_analysis = yaml.safe_load(file) analysis_permissions_obj = yaml_analysis['Properties']['Permissions'] + with open('resources/theme_resource_CFN_skel.yaml', 'r') as file: + yaml_theme = yaml.safe_load(file) + theme_permissions_obj = yaml_theme['Properties']['Permissions'] + updated = False for resourceId in aab_resources.keys(): resource = aab_resources[resourceId] resourceType = resource['Type'] - if resourceType in ['AWS::QuickSight::Analysis', 'AWS::QuickSight::DataSet', 'AWS::QuickSight::DataSource']: + if resourceType in ['AWS::QuickSight::Analysis', 'AWS::QuickSight::DataSet', 'AWS::QuickSight::DataSource', 'AWS::QuickSight::Theme']: updated = True if resource['Type'] == 'AWS::QuickSight::Analysis': resource['Properties']['Permissions'] = copy.deepcopy(analysis_permissions_obj) @@ -1060,6 +1338,8 @@ def add_permissions_to_AAB_resources(template_content:dict): resource['Properties']['Permissions'] = copy.deepcopy(dataset_permissions_obj) elif resource['Type'] == 'AWS::QuickSight::DataSource': resource['Properties']['Permissions'] = copy.deepcopy(datasource_permissions_obj) + elif resource['Type'] == 'AWS::QuickSight::Theme': + resource['Properties']['Permissions'] = copy.deepcopy(theme_permissions_obj) if updated: template_content['Parameters']['QSUser'] = { @@ -1108,14 +1388,14 @@ def generate_template_outputs(analysis_obj:QSAnalysisDef, source_template_conten return source_template_content, dest_template_content -def generate_cloud_formation_override_list_AAB(analysisObj:QSAnalysisDef): +def generate_cloud_formation_override_list_AAB(analysisObjList:QSAnalysisDef): """ Helper function that generates the CloudFormation template override object to be used in the start_asset_bundle_export_job API call Parameters: - analysisObj(QSAnalysisDef): Analysis object as returned by describe_analysis QS API method + analysisObjList [QSAnalysisDef]: List of Analysis object as returned by describe_analysis QS API method Returns: @@ -1133,42 +1413,43 @@ def generate_cloud_formation_override_list_AAB(analysisObj:QSAnalysisDef): datasource_arns = [] DataSourceOverridePropertiesList = [] - - for dataset in analysisObj.datasets: - if dataset.refreshSchedules != []: - for schedule in dataset.refreshSchedules: - refresh_schedules_arns.append(schedule['Arn']) - RefreshScheduleOverridePropertyObj = { - 'Arn': schedule['Arn'], - 'Properties': ['StartAfterDateTime'] + for analysisObj in analysisObjList: + for dataset in analysisObj.datasets: + if dataset.refreshSchedules != []: + for schedule in dataset.refreshSchedules: + refresh_schedules_arns.append(schedule['Arn']) + RefreshScheduleOverridePropertyObj = { + 'Arn': schedule['Arn'], + 'Properties': ['StartAfterDateTime'] + } + RefreshScheduleOverridePropertiesList.append(RefreshScheduleOverridePropertyObj) + + for datasource in dataset.dependingDSources: + if datasource.arn not in datasource_arns: + datasource_arns.append(datasource.arn) + else: + print('generate_cloud_formation_override_list_AAB: Skipping datasource {datasource_name} as it has been already processed'.format(datasource_name=datasource.name)) + continue + if (isinstance(datasource, QSRDSDatasourceDef) or isinstance(datasource, QSRDBMSDatasourceDef)) and datasource.vpcConnectionArn != '': + vpc_conn_arns.append(datasource.vpcConnectionArn) + properties = [] + # TODO add support for other datasource types + if isinstance(datasource, QSRDSDatasourceDef): + properties = ['SecretArn','Username','Password','InstanceId', 'Database'] + if isinstance(datasource, QSRDBMSDatasourceDef): + properties = ['SecretArn','Username','Password','Host', 'Database'] + if datasource.type == SourceType.REDSHIFT: + properties.append('ClusterId') + if isinstance(datasource, QSServiceDatasourceDef): + if datasource.type == SourceType.S3: + properties = ['ManifestFileLocation'] + if datasource.type == SourceType.ATHENA: + properties = ['WorkGroup'] + DataSourceOverridePropertyObj = { + 'Arn': datasource.arn, + 'Properties': properties } - RefreshScheduleOverridePropertiesList.append(RefreshScheduleOverridePropertyObj) - - for datasource in dataset.dependingDSources: - if datasource.arn not in datasource_arns: - datasource_arns.append(datasource.arn) - else: - print('generate_cloud_formation_override_list_AAB: Skipping datasource {datasource_name} as it has been already processed'.format(datasource_name=datasource.name)) - continue - if (isinstance(datasource, QSRDSDatasourceDef) or isinstance(datasource, QSRDBMSDatasourceDef)) and datasource.vpcConnectionArn != '': - vpc_conn_arns.append(datasource.vpcConnectionArn) - properties = [] - if isinstance(datasource, QSRDSDatasourceDef): - properties = ['SecretArn','Username','Password','InstanceId', 'Database'] - if isinstance(datasource, QSRDBMSDatasourceDef): - properties = ['SecretArn','Username','Password','Host', 'Database'] - if datasource.type == SourceType.REDSHIFT: - properties.append('ClusterId') - if isinstance(datasource, QSServiceDatasourceDef): - if datasource.type == SourceType.S3: - properties = ['ManifestFileLocation'] - if datasource.type == SourceType.ATHENA: - properties = ['WorkGroup'] - DataSourceOverridePropertyObj = { - 'Arn': datasource.arn, - 'Properties': properties - } - DataSourceOverridePropertiesList.append(DataSourceOverridePropertyObj) + DataSourceOverridePropertiesList.append(DataSourceOverridePropertyObj) #remove any duplicates vpc_conn_arns = list(set(vpc_conn_arns)) @@ -1176,7 +1457,7 @@ def generate_cloud_formation_override_list_AAB(analysisObj:QSAnalysisDef): for vpc_conn_arn in vpc_conn_arns: VPCConnectionOverridePropertyObj = { 'Arn': vpc_conn_arn, - 'Properties': ['Name','DnsResolvers','RoleArn',] + 'Properties': ['Name','DnsResolvers','RoleArn'] } VPCConnectionOverridePropertiesList.append(VPCConnectionOverridePropertyObj) @@ -1186,7 +1467,7 @@ def generate_cloud_formation_override_list_AAB(analysisObj:QSAnalysisDef): }, 'VPCConnections': VPCConnectionOverridePropertiesList, 'RefreshSchedules': RefreshScheduleOverridePropertiesList, - 'DataSources': DataSourceOverridePropertiesList + 'DataSources': DataSourceOverridePropertiesList } if len(vpc_conn_arns) == 0: @@ -1197,17 +1478,18 @@ def generate_cloud_formation_override_list_AAB(analysisObj:QSAnalysisDef): return CloudFormationOverridePropertyConfiguration -def replicate_dashboard_via_template(analysisObj:QSAnalysisDef, remap): +def replicate_dashboard_via_template(analysisObjList:list, remap): """ Helper function that replicates a QuickSight dashboard using a template and also create assets for all the depending assets (datasets, datasources and secrets) Parameters: - analysisObj(QSAnalysisDef): Analysis object as returned by describe_analysis QS API method + analysisObjList(List[QSAnalysisDef]): List of Analysis objects remap(Boolean): Whether or not the datasource definitions should be remapped (always True for AAB) Returns: - None + source_account_yaml, dest_account_yaml YAML objects representing the generated templates (source and destination) + Examples: @@ -1216,50 +1498,59 @@ def replicate_dashboard_via_template(analysisObj:QSAnalysisDef, remap): """ dest_account_yaml = {} + source_account_yaml = {} with open('resources/dest_CFN_skel.yaml', 'r') as file: - dest_account_yaml = yaml.safe_load(file) - - dest_account_yaml['Resources'] = {} - - datasets = analysisObj.datasets - - for datasetDefObj in datasets: + dest_account_yaml = yaml.safe_load(file) + with open('resources/source_CFN_skel.yaml', 'r') as file: + source_account_yaml = yaml.safe_load(file) + + dest_account_yaml['Resources'] = {} + source_account_yaml['Resources'] = {} + analysisIndex = 0 + for analysisObj in analysisObjList: + print("Item {index}/{total}: Replicating dashboard {dashboard_id} from analysis {analysis_id} ..." + .format(index=analysisIndex+1, total=len(analysisObjList),dashboard_id=analysisObj.AssociatedDashboardId, analysis_id=analysisObj.id)) + + datasets = analysisObj.datasets + # TODO: Check logic + for datasetDefObj in datasets: + + for datasourceDefObj in datasetDefObj.dependingDSources: + try: + dest_account_yaml = generateDataSourceCFN(datasourceDefObj=datasourceDefObj, appendContent=dest_account_yaml, remap=remap) + except ValueError as error: + print(error) + print('There was an issue creating the following datasource: {datasourceId} cannot proceed further'.format(datasourceId=datasourceDefObj.id)) + return { + 'statusCode': 500 + } + + source_account_yaml = generateQSTemplateCFN(analysisDefObj=analysisObj, appendContent=source_account_yaml) - for datasourceDefObj in datasetDefObj.dependingDSources: - try: - dest_account_yaml = generateDataSourceCFN(datasourceDefObj=datasourceDefObj, appendContent=dest_account_yaml, remap=remap) - except ValueError as error: - print(error) - print('There was an issue creating the following datasource: {datasourceId} cannot proceed further'.format(datasourceId=datasourceDefObj.id)) - return { - 'statusCode': 500 - } - - source_account_yaml, SOURCE_TEMPLATE_ID = generateQSTemplateCFN(analysisDefObj=analysisObj) - - for datasetObj in analysisObj.datasets: - dest_account_yaml = generateDataSetCFN(datasetObj=datasetObj, datasourceObjs=datasetObj.dependingDSources, tableMap=datasetObj.physicalTableMap, appendContent=dest_account_yaml) + for datasetObj in analysisObj.datasets: + dest_account_yaml = generateDataSetCFN(datasetObj=datasetObj, datasourceObjs=datasetObj.dependingDSources, tableMap=datasetObj.physicalTableMap, appendContent=dest_account_yaml) - dest_account_yaml = generateAnalysisFromTemplateCFN(analysisObj=analysisObj, templateId=SOURCE_TEMPLATE_ID, appendContent=dest_account_yaml) + dest_account_yaml = generateAnalysisFromTemplateCFN(analysisObj=analysisObj, templateId=analysisObj.TemplateId, appendContent=dest_account_yaml) - source_account_yaml, dest_account_yaml = generate_template_outputs(analysis_obj=analysisObj, source_template_content=source_account_yaml, dest_template_content=dest_account_yaml) + source_account_yaml, dest_account_yaml = generate_template_outputs(analysis_obj=analysisObj, source_template_content=source_account_yaml, dest_template_content=dest_account_yaml) + analysisIndex = analysisIndex + 1 return source_account_yaml, dest_account_yaml -def replicate_dashboard_via_AAB(analysisObj:QSAnalysisDef, remap): +def replicate_dashboard_via_AAB(analysisObjList:list, remap): """ Helper function that replicates a QuickSight dashboard using a assets as bundle and outputs results in CLOUDFORMATION_JSON Parameters: - analysisObj(QSAnalysisDef): Analysis object as returned by describe_analysis QS API method + analysisObjList(List[QSAnalysisDef]): List of Analysis objects remap(Boolean): Whether or not the datasource definitions and other properties should be remapped (more info here https://a.co/g1Tf0fp) Returns: - None + source_account_yaml, dest_account_yaml YAML objects representing the generated templates (source and destination) Examples: @@ -1273,14 +1564,14 @@ def replicate_dashboard_via_AAB(analysisObj:QSAnalysisDef, remap): EXPORT_TERMINAL_STATUSES = ['SUCCESSFUL', 'FAILED'] initial_wait_time_sec = 5 - resourceArns = [analysisObj.arn] + resourceArns = [ analysis.arn for analysis in analysisObjList] if remap: - CloudFormationOverridePropertyConfiguration = generate_cloud_formation_override_list_AAB(analysisObj=analysisObj) - ret = qs.start_asset_bundle_export_job (AwsAccountId=SOURCE_AWS_ACCOUNT_ID, AssetBundleExportJobId=EXPORT_JOB_ID, ResourceArns=resourceArns, IncludeAllDependencies=True, + CloudFormationOverridePropertyConfiguration = generate_cloud_formation_override_list_AAB(analysisObjList=analysisObjList) + ret = qs.start_asset_bundle_export_job (AwsAccountId=FIRST_STAGE_ACCOUNT_ID, AssetBundleExportJobId=EXPORT_JOB_ID, ResourceArns=resourceArns, IncludeAllDependencies=True, ExportFormat='CLOUDFORMATION_JSON', CloudFormationOverridePropertyConfiguration=CloudFormationOverridePropertyConfiguration) else: - ret = qs.start_asset_bundle_export_job (AwsAccountId=SOURCE_AWS_ACCOUNT_ID, AssetBundleExportJobId=EXPORT_JOB_ID, ResourceArns=resourceArns, IncludeAllDependencies=True, + ret = qs.start_asset_bundle_export_job (AwsAccountId=FIRST_STAGE_ACCOUNT_ID, AssetBundleExportJobId=EXPORT_JOB_ID, ResourceArns=resourceArns, IncludeAllDependencies=True, ExportFormat='CLOUDFORMATION_JSON', ValidationStrategy={'StrictModeForAllResources':False}) # Check progress @@ -1289,7 +1580,7 @@ def replicate_dashboard_via_AAB(analysisObj:QSAnalysisDef, remap): while MAX_RETRIES > 0: MAX_RETRIES = MAX_RETRIES - 1 - ret = qs.describe_asset_bundle_export_job(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, AssetBundleExportJobId=EXPORT_JOB_ID) + ret = qs.describe_asset_bundle_export_job(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, AssetBundleExportJobId=EXPORT_JOB_ID) if ret['JobStatus'] in EXPORT_TERMINAL_STATUSES: break print('Assets as Bundle export job with id {id} is currently in a non terminal status ({status}) waiting for {seconds} seconds'.format(id=EXPORT_JOB_ID, status=ret['JobStatus'], seconds=initial_wait_time_sec)) @@ -1301,8 +1592,8 @@ def replicate_dashboard_via_AAB(analysisObj:QSAnalysisDef, remap): downloadURL = ret['DownloadUrl'] - json_filename = '{output_dir}/{analysis_id}_CFN_bundle.json'.format(output_dir=OUTPUT_DIR, analysis_id=analysisObj.id) - yaml_filename = '{output_dir}/{analysis_id}_CFN_bundle.yaml'.format(output_dir=OUTPUT_DIR, analysis_id=analysisObj.id) + json_filename = '{output_dir}/{export_job_id}_CFN_bundle.json'.format(output_dir=OUTPUT_DIR, export_job_id=EXPORT_JOB_ID) + yaml_filename = '{output_dir}/{export_job_id}_CFN_bundle.yaml'.format(output_dir=OUTPUT_DIR, export_job_id=EXPORT_JOB_ID) if downloadURL.lower().startswith('http'): ret = urlretrieve(downloadURL, json_filename) @@ -1319,40 +1610,418 @@ def replicate_dashboard_via_AAB(analysisObj:QSAnalysisDef, remap): return source_account_yaml, dest_account_yaml -def lambda_handler(event, context): +# helper function that takes a cloudformation stack definition and returns a list of objects mapping the CFN resource Id and the QS resource Id +def generate_resource_id_mapping(template_content:dict): + """ + Helper function that takes a cloudformation stack definition and returns a list of objects mapping the CFN resource Id and the QS resource Id - calledViaEB = False + Parameters: - remap = REMAP_DS == 'true' + template_content(dict): Cloudformation stack definition in yaml - if 'source' in event and event['source'] == 'aws.quicksight': - print('Lambda function called via EventBridge') - calledViaEB = True - if 'resources' in event: - updated_dashboard_id = event['resources'].pop().split('dashboard/')[1] - if updated_dashboard_id != DASHBOARD_ID: - print('This lambda is configured to promote dashboard id {dashboard_id}, however the updated dashboard in event is {updated_dashboard_id}. Skipping ...'.format(dashboard_id=DASHBOARD_ID, updated_dashboard_id=updated_dashboard_id)) - return { - 'statusCode': 200 + Returns: + + resourceIdMapping(List[dict]): List of objects mapping the CFN resource Id and the QS resource Id + + Examples: + + >>> generate_resource_id_mapping(template_content) + + """ + resourceIdMapping = [] + template_resources = template_content['Resources'] + RESOURCES_TO_MAP = ['AWS::QuickSight::DataSource', 'AWS::QuickSight::DataSet', 'AWS::QuickSight::Analysis', 'AWS::QuickSight::VPCConnection', 'AWS::QuickSight::Theme'] + + for resource_key in template_resources.keys(): + resource = template_resources[resource_key] + if resource['Type'] in RESOURCES_TO_MAP: + resource_type = resource['Type'].split('::')[-1] + resource_map = { + 'CFNId': resource_key, + 'ResourceId': resource['Properties']['{resource_type}Id'.format(resource_type=resource_type)], + 'ResourceType': resource_type } + resourceIdMapping.append(resource_map) + + return resourceIdMapping + +# helper function that gets a CFNId reference and a resource_id_mapping object and returns the mapped resource +def get_mapped_resource(cfnId:str, resource_id_mapping: dict): + """ + Helper function that gets a CFNId reference and a resource_id_mapping object and returns the mapped resource + + Parameters: + + cfnId(str): CFNId reference + resourceIdMapping(List[dict]): List of objects mapping the CFN resource Id and the QS resource Id + + Returns: + + mappedResource(dict): Object mapping the CFN resource Id and the QS resource Id + + Examples: + + >>> get_mapped_resource(cfnId, resourceIdMapping) + + """ + mappedResource = [mapping for mapping in resource_id_mapping if mapping['CFNId'] == cfnId].pop() + + return mappedResource + +# helper function that takes a cloudformation stack definition and changes all CFN object references with Ids so it can be splitted +def change_stack_references_to_ids(template_content:dict, resource_id_mapping: dict): + """ + Helper function that takes a cloudformation stack definition and changes all CFN object references with Ids so it can be splitted + + Parameters: + + template_content(dict): Cloudformation stack definition in yaml + resourceIdMapping(List[dict]): List of objects mapping the CFN resource Id and the QS resource Id - CONFIGURATION_FILES_PREFIX = '{pipeline_name}/ConfigFiles' - ASSETS_FILES_PREFIX = '{pipeline_name}/CFNTemplates' + Returns: + + template_content(dict): Cloudformation stack definition in yaml with all references changed to ids + + Examples: + + >>> change_stack_references_to_ids(template_content) + + """ + template_resources = template_content['Resources'] + RESOURCES_TO_CHANGE = ['AWS::QuickSight::DataSource', 'AWS::QuickSight::DataSet', 'AWS::QuickSight::Analysis'] + SUPPORTED_PHYSICAL_TABLE_TYPES = ['CustomSql','RelationalTable', 'S3Source'] + + for resource_key in template_resources.keys(): + resource = template_resources[resource_key] + if resource['Type'] in RESOURCES_TO_CHANGE: + # Process analysis objects + if resource['Type'] == 'AWS::QuickSight::Analysis': + # replace DataSetArn references for each dataset + for datasetIdDeclaration in resource['Properties']['Definition']['DataSetIdentifierDeclarations']: + expectedType = 'DataSet' + referenceId = datasetIdDeclaration['DataSetArn']['Fn::GetAtt'][0] + mappedResource = get_mapped_resource(referenceId, resource_id_mapping) + # The dataset ids in ASSETS AS BUNDLE CFN output are sanitized by taking the first 20 chars of the resource id, removing '-' characters and appending a hash of 6 extra charcters + if mappedResource['ResourceType'] == expectedType: + datasetIdDeclaration['DataSetArn'] = { + 'Fn::Sub' : 'arn:${{AWS::Partition}}:quicksight:${{AWS::Region}}:${{AWS::AccountId}}:dataset/{datasetId}'.format(datasetId=mappedResource['ResourceId']) + } + else: + raise ValueError('Invalid Resource Type in resourceIdMapping object, expected type was {expected_type} but type in mapping for resource with id {resource_id} was {actual_type}' + .format(expected_type= expectedType, resource_id = mappedResource['ResourceId'], actual_type = mappedResource['ResourceType'])) + if 'ThemeArn' in resource['Properties']: + # Analysis has a ThemeArn reference that we need to replace + expectedType = 'Theme' + referenceId = resource['Properties']['ThemeArn']['Fn::GetAtt'][0] + mappedResource = get_mapped_resource(referenceId, resource_id_mapping) + if mappedResource ['ResourceType'] == expectedType: + resource['Properties']['ThemeArn'] = { + 'Fn::Sub' : 'arn:${{AWS::Partition}}:quicksight:${{AWS::Region}}:${{AWS::AccountId}}:theme/{themeId}'.format(themeId=mappedResource['ResourceId']) + } + else: + raise ValueError('Invalid Resource Type in resourceIdMapping object, expected type was {expected_type} but type in mapping for resource with id {resource_id} was {actual_type}' + .format(expected_type= expectedType, resource_id = mappedResource['ResourceId'], actual_type = mappedResource['ResourceType'])) + + # Process data sources objects that contain a VPC Connection + if resource['Type'] == 'AWS::QuickSight::DataSource' and 'VpcConnectionProperties' in resource['Properties']: + expectedType = 'VPCConnection' + vpc_connection_properties = resource['Properties']['VpcConnectionProperties'] + referenceId = vpc_connection_properties['VpcConnectionArn']['Fn::GetAtt'][0] + mappedResource = get_mapped_resource(referenceId, resource_id_mapping) + if mappedResource ['ResourceType'] == expectedType: + vpc_connection_properties['VpcConnectionArn'] = { + 'Fn::Sub' : 'arn:${{AWS::Partition}}:quicksight:${{AWS::Region}}:${{AWS::AccountId}}:vpcConnection/{vpc_connection_id}'.format(vpc_connection_id=mappedResource['ResourceId']) + } + else: + raise ValueError('Invalid Resource Type in resourceIdMapping object, expected type was {expected_type} but type in mapping for resource with id {resource_id} was {actual_type}' + .format(expected_type= expectedType, resource_id = mappedResource['ResourceId'], actual_type = mappedResource['ResourceType'])) + + # Process dataset objects + if resource['Type'] == 'AWS::QuickSight::DataSet': + datasetId = resource['Properties']['DataSetId'] + + expectedType = 'DataSource' + for physicalTableMapObjectKey in resource['Properties']['PhysicalTableMap'].keys(): + physicalTableTypeKeys = resource['Properties']['PhysicalTableMap'][physicalTableMapObjectKey].keys() + physicalTableType = list(physicalTableTypeKeys)[0] + referenceId = resource['Properties']['PhysicalTableMap'][physicalTableMapObjectKey][physicalTableType]['DataSourceArn']['Fn::GetAtt'][0] + mappedResource = get_mapped_resource(referenceId, resource_id_mapping) + if physicalTableType in SUPPORTED_PHYSICAL_TABLE_TYPES: + if mappedResource['ResourceType'] == expectedType: + resource['Properties']['PhysicalTableMap'][physicalTableMapObjectKey][physicalTableType]['DataSourceArn'] = { + 'Fn::Sub' : 'arn:${{AWS::Partition}}:quicksight:${{AWS::Region}}:${{AWS::AccountId}}:datasource/{datasource_id}'.format(datasource_id=mappedResource['ResourceId']) + } + else: + raise ValueError('Invalid Resource Type in resourceIdMapping object, expected type was {expected_type} but type in mapping for resource with id {resource_id} was {actual_type}' + .format(expected_type= expectedType, resource_id = mappedResource['ResourceId'], actual_type = mappedResource['ResourceType'])) + else: + raise ValueError('Unsupported Physical Table Type in CFN template, supported types are {supported_types} but type {actual_type} is used in dataset {dataset_id}' + .format(supported_types=SUPPORTED_PHYSICAL_TABLE_TYPES, actual_type=physicalTableType, dataset_id=datasetId)) + + return template_content + +# helper function that returns the group where a given resource id is located in a grouped_resources_content +def get_resource_group(resource_id:str, grouped_resources_content:dict): + """ + Helper function that returns the group where a given resource id is located in a grouped_resources_content + + Parameters: + + resource_id(str): Resource id to find in grouped_resources_content + grouped_resources_content(dict): Dictionary of grouped resources to generate nested stacks + + Returns: + + group(str): Group where resource_id is located in grouped_resources_content + + Examples: + + >>> get_resource_group(resource_id, grouped_resources_content) + + """ + sanitized_id = resource_id.replace('-', '') + # print('Looking for resource id:'+sanitized_id) + # print('Grouped resources content is:') + # print(grouped_resources_content) + for group in grouped_resources_content.keys(): + for CFNresourceId in grouped_resources_content[group]['Resources'].keys(): + if sanitized_id[:20] in CFNresourceId: + return group + + return None + +# helper function that takes a cloudformation stack definition in yaml and splits its resources into nested stacks +def split_stack_resources_and_parameters_into_groups(template_content:dict): + """ + Helper function that takes a cloudformation stack definition in yaml and splits its resources into groups based on configuration, also parameters for resources are grouped and returned + + Parameters: + + template_content(dict): Cloudformation stack definition in yaml + + Returns: + + grouped_resources_content(dict): Dictionary of grouped resources to generate nested stacks + grouped_parameters_content(dict): Dictionary of grouped parameters to generate nested stacks + + Examples: + + >>> split_stack_into_nested_stacks(template_content) + + """ + resources_to_split = { + 'datasources' : ['AWS::QuickSight::DataSource'], + 'datasets' : ['AWS::QuickSight::DataSet'], + 'analysis' : ['AWS::QuickSight::Analysis'], + 'vpcConnections' : ['AWS::QuickSight::VPCConnection', 'AWS::QuickSight::Theme'] + } + + colocated_resources = { + 'AWS::QuickSight::RefreshSchedule': 'datasets' + } + + grouped_resources_content = {} + grouped_parameters_content = {} + + parameters_mapping = { + 'datasources' : ['DstQSAdminRegion', 'QSUser'], + 'datasets' : ['DstQSAdminRegion', 'QSUser'], + 'analysis' : ['DstQSAdminRegion', 'QSUser'], + 'vpcConnections' : ['DstQSAdminRegion', 'QSUser'] + } + + if REPLICATION_METHOD == 'TEMPLATE': + parameters_mapping['analysis'] = parameters_mapping['analysis'] + ['SrcQSRegion','SourceAccountID'] + + template_resources = template_content['Resources'] + template_parameters = template_content['Parameters'] + added_parameters = ['DstQSAdminRegion', 'QSUser'] + + MAX_RESOURCES_PER_GROUP = 10 + for resource_type in resources_to_split.keys(): + group_index = 0 + for resource_key in template_resources.keys(): + resource = template_resources[resource_key] + # Remove DependsOn elements as now dependencies are managed between stack sets so they are no longer needed exept if the resource type is a colocated resource ... + if 'DependsOn' in resource.keys() and resource['Type'] not in colocated_resources: + del(resource['DependsOn']) + if resource['Type'] in resources_to_split[resource_type]: + resource_index = '{resource_type}_{index}'.format(resource_type=resource_type, index=group_index) + if resource_index in grouped_resources_content.keys() and len(grouped_resources_content[resource_index]['Resources']) >= MAX_RESOURCES_PER_GROUP: + group_index = group_index + 1 + resource_index = '{resource_type}_{index}'.format(resource_type=resource_type, index=group_index) + + if resource_index not in grouped_resources_content.keys(): + grouped_resources_content[resource_index] = {} + grouped_resources_content[resource_index]['Resources'] = {} + grouped_parameters_content[resource_index] = {} + grouped_parameters_content[resource_index]['Parameters'] = {} + + grouped_resources_content[resource_index]['Resources'][resource_key] = resource + # adding grouped parameters + for parameter in parameters_mapping[resource_type]: + grouped_parameters_content[resource_index]['Parameters'][parameter] = template_parameters[parameter] + + if resource_type == 'datasources': + # we need to add datasource parameters + datasourceId = resource['Properties']['DataSourceId'] + parameter_list = [{key: template_parameters[key]} for key in list(template_parameters) if 'datasource:{datasource_id}'.format(datasource_id=datasourceId) in template_parameters[key]['Description']] + for parameter in parameter_list: + param_key = list(parameter.keys())[0] + grouped_parameters_content[resource_index]['Parameters'][param_key] = parameter[param_key] + added_parameters.append(param_key) + if resource_type == 'vpcConnections' and resource['Type'] == 'AWS::QuickSight::VPCConnection': + # we need to add vpc connection parameters + vpcConnectionId = resource['Properties']['VPCConnectionId'] + parameter_list = [{key: template_parameters[key]} for key in list(template_parameters) if 'vpcConnection:{vpc_connection_id}'.format(vpc_connection_id=vpcConnectionId) in template_parameters[key]['Description']] + for parameter in parameter_list: + param_key = list(parameter.keys())[0] + grouped_parameters_content[resource_index]['Parameters'][param_key] = parameter[param_key] + added_parameters.append(param_key) + + elif resource['Type'] in colocated_resources and resource_type == colocated_resources[resource['Type']]: + # needs to be colocated and is a RefreshSchedule + if resource['Type'] == 'AWS::QuickSight::RefreshSchedule': + # we need to add to the datasets group + datasetId = resource['Properties']['DataSetId'] + refreshScheduleId = resource['Properties']['Schedule']['ScheduleId'] + target_group = get_resource_group(datasetId, grouped_resources_content) + grouped_resources_content[target_group]['Resources'][resource_key] = resource + parameter_list = [{key: template_parameters[key]} for key in list(template_parameters) if 'refresh-schedule:{schedule_id}'.format(schedule_id=refreshScheduleId) in template_parameters[key]['Description']] + for parameter in parameter_list: + param_key = list(parameter.keys())[0] + grouped_parameters_content[target_group]['Parameters'][param_key] = parameter[param_key] + added_parameters.append(param_key) + + + return grouped_resources_content, grouped_parameters_content + +# helper function that takes a dictionary of grouped resources and creates a cloudformation stack for each of the groups and persist the file in yaml format locally +def generate_nested_stacks_from_grouped_resources(grouped_resources_content:dict, grouped_parameters_content:dict, credentials:object): + """ + Helper function that takes a dictionary of grouped resources and creates a cloudformation stack for each of the groups and persist the file in yaml format locally + + Parameters: + + grouped_resources_content(dict): Dictionary containing groups of resources + grouped_parameters_content(dict): Dictionary containing groups of parameters + credentials(object): Credentials object containing the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY to use with S3 + + Returns: + + root_stack_skel(dict): The root template content in pyyaml format + + Examples: + + >>> generate_nested_stacks_from_grouped_resources(grouped_resources_content, template_id, remap) + + """ + + dependencies = { + 'analysis': ['datasets'], + 'datasets': ['datasources'], + 'datasources': ['vpcConnections'], + 'vpcConnections': [] + } + + parent_stack_skel = { + 'AWSTemplateFormatVersion': '2010-09-09', + 'Description': 'Parent Stack for QuickSight CI/CD Pipeline {pipeline_name}, nested stacks are being generated because GENERATE_NESTED_STACKS was set to True in the synthesizer lambda function'.format(pipeline_name=PIPELINE_NAME), + 'Resources': {}, + } + + nested_stack_skel = { + 'AWSTemplateFormatVersion': '2010-09-09', + 'Resources': {}, + 'Description': 'Nested Stack for {group_name}' + } + + all_parameters = {} + + + for group in grouped_parameters_content: + group_parameters = grouped_parameters_content[group]['Parameters'] + for parameter_key in group_parameters.keys(): + all_parameters[parameter_key] = group_parameters[parameter_key] - replication_handler = None - ## Create Analysis Object - ret = qs.describe_dashboard(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DashboardId=DASHBOARD_ID) + for group in grouped_resources_content.keys(): + # NESTED STACK TEMPLATES GENERATION + # process the grouped resources content and create one nested stack for each group + group_resources = grouped_resources_content[group]['Resources'] + group_parameters = grouped_parameters_content[group]['Parameters'] + nested_stack_skel['Resources'] = group_resources + nested_stack_skel['Description'] = 'Nested Stack for {group_name}'.format(group_name=group) + nested_stack_skel['Parameters'] = group_parameters + + # write the nested stack skeleton to a file and upload it to a bucket + filename = '{group_name}.template'.format(group_name=group) + nested_stack_filename = '{output_dir}/{filename}'.format(output_dir=OUTPUT_DIR, filename=filename) + writeToFile(nested_stack_filename, nested_stack_skel, format="yaml") + uploadFileToS3(bucket=DEPLOYMENT_S3_BUCKET, filename=nested_stack_filename, region=AWS_REGION, object_name=filename,prefix=ASSETS_FILES_PREFIX, bucket_owner=DEPLOYMENT_ACCOUNT_ID, credentials=credentials) + + # PARENT TEMPLATE GENERATION + # process the grouped resources content and create one nested stack for each group in the parent template + nested_stack_id = 'nestedStack{group_name}'.format(group_name=group.replace('_', '')) + depending_groups = [ 'nestedStack{group_name}'.format(group_name=x.replace('_','')) for x in grouped_resources_content if x.split('_')[0] in dependencies[group.split('_')[0]]] + parameters = {} + filename = '{group_name}.template'.format(group_name=group) + key= '{prefix}/{key}'.format(prefix=ASSETS_FILES_PREFIX, key=filename) + # generate a presigned URL + presignedUrl = generatePresignedUrl(key=key, bucket=DEPLOYMENT_S3_BUCKET, region=AWS_REGION, credentials=credentials) + templateUrlSkel = 'http://s3.amazonaws.com/{s3Bucket}/{prefix}/{group_name}.template' + for parameterKey in grouped_parameters_content[group]['Parameters'].keys(): + parameters[parameterKey] = { + 'Ref' : parameterKey + } + + parent_stack_skel['Resources'][nested_stack_id] = { + 'Type' : 'AWS::CloudFormation::Stack', + 'Properties' : { + 'TemplateURL' : presignedUrl, + 'Parameters' : parameters + } + } + + if len(depending_groups) > 0: + parent_stack_skel['Resources'][nested_stack_id]['DependsOn'] = depending_groups + + parent_stack_skel['Parameters'] = all_parameters + + return parent_stack_skel + +# Helper function that creates an QSAnalysisDef object from the analysis that originated the dashboard ID passed as argument, this object will be then used to generate a cloudformation template to build such analysis +def getAnalysisAssociatedWithDashboard(dashboardId, ds_index): + """ + Helper function that creates an QSAnalysisDef object from the analysis that originated the dashboard ID passed as argument, this object will be then used to generate a cloudformation template to build such analysis + + Parameters: + + dashboardId(String): Dashboard ID + ds_index(Integer): Start index (within the generated CFN template) to use for the first (and subsequent) analysis datasources + + Returns: + + analysisObj(QSAnalysisDef): Object encapsulating all the information and depending assets from the analysis + ds_index(Integer): Index (within the generated CFN template) of the last datasource in the analysis + + Examples: + + >>> getAnalysisAssociatedWithDashboard(dashboardId) + + """ + + ret = qs.describe_dashboard(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DashboardId=dashboardId) source_analysis_arn = ret['Dashboard']['Version']['SourceEntityArn'] analysis_id = source_analysis_arn.split('analysis/')[1] - ret = qs.describe_analysis(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, AnalysisId=analysis_id) + ret = qs.describe_analysis(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, AnalysisId=analysis_id) dataset_arns = ret['Analysis']['DataSetArns'] - ds_count = 0 + ds_count = ds_index datasourceDefObjList = [] datasetsDefObjList = [] analysis_name = ret['Analysis']['Name'] - permissions = qs.describe_analysis_permissions(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, AnalysisId=analysis_id) + permissions = qs.describe_analysis_permissions(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, AnalysisId=analysis_id) owner = permissions['Permissions'].pop() username = owner['Principal'].split('default/') qs_admin_region = owner['Principal'].split(':')[3] @@ -1367,13 +2036,13 @@ def lambda_handler(event, context): dset_datasources = [] datasourceDefObjList = [] datasetId = datasetarn.split('dataset/')[-1] - ret = qs.describe_data_set(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSetId=datasetId ) + ret = qs.describe_data_set(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSetId=datasetId ) physicalTableKeys= get_physical_table_map_object(ret['DataSet']['PhysicalTableMap']) importMode = None if ret['DataSet']['ImportMode'] == ImportMode.SPICE.name: importMode = ImportMode.SPICE - ret_refresh_schedules = qs.list_refresh_schedules(AwsAccountId=SOURCE_AWS_ACCOUNT_ID, DataSetId=datasetId) + ret_refresh_schedules = qs.list_refresh_schedules(AwsAccountId=FIRST_STAGE_ACCOUNT_ID, DataSetId=datasetId) else: importMode = ImportMode.DIRECT_QUERY datasetObj = QSDataSetDef(name=ret['DataSet']['Name'], id=datasetId, importMode=importMode, placeholdername=ret['DataSet']['Name'], refreshSchedules=ret_refresh_schedules, physicalTableMap=physicalTableKeys) @@ -1403,7 +2072,8 @@ def lambda_handler(event, context): rls_dataset_ids.append(ret['DataSet']['RowLevelPermissionDataSet']['Arn'].split('dataset/')[-1]) - analysis = QSAnalysisDef(name=analysis_name, arn=analysis_arn,QSAdminRegion=qs_admin_region, QSRegion=analysis_region, QSUser=username, AccountId=SOURCE_AWS_ACCOUNT_ID, TemplateId=SOURCE_TEMPLATE_ID, PipelineName=PIPELINE_NAME) + analysis = QSAnalysisDef(name=analysis_name, arn=analysis_arn,QSAdminRegion=qs_admin_region, QSRegion=analysis_region, QSUser=username, AccountId=FIRST_STAGE_ACCOUNT_ID, PipelineName=PIPELINE_NAME, + AssociatedDashboardId=dashboardId) analysis.datasets = datasetsDefObjList #Now we need to tag RLS datasets to make sure they are not included in Analysis template definition @@ -1412,31 +2082,72 @@ def lambda_handler(event, context): rls_dset_obj = analysis.getDatasetById(dataset_id) rls_dset_obj.isRLS = True + return analysis, ds_count + +def lambda_handler(event, context): + + calledViaEB = False + + remap = REMAP_DS == 'true' + generate_nested_stacks = GENERATE_NESTED_STACKS == 'true' + + print("Execution MODE is {mode}".format(mode=MODE)) + + replication_handler = None + credentials = assumeRoleInDeplAccount(role_arn=DEPLOYMENT_DEV_ACCOUNT_ROLE_ARN) + + asset_id_list = read_all_assetIds_from_dynamo(region=AWS_REGION, credentials=credentials) + + # Validate if each asset on the list is actually a Dashboard + for asset_id in asset_id_list: + if not validate_asset_id(assetId=asset_id, region=AWS_REGION): + return { + 'statusCode': 500, + 'body': 'Asset id {asset_id} is not a dashboard, at the moment only QuickSight dashboards are supported in this pipeline, please fix this and retry ...'.format(asset_id=asset_id) + } + + analysisObjList = [] + + source_account_yaml = {} + dest_account_yaml = {} + ds_index = 0 + + # Now we are sure that all the assets on the list are dashboards, we can create a list of QSAnalysisDef objects with each of their originating analyses. + for dashboardId in asset_id_list: + analysisObj, ds_index = getAnalysisAssociatedWithDashboard(dashboardId=dashboardId, ds_index=ds_index) + analysisObjList.append(analysisObj) + + if 'source' in event and event['source'] == 'aws.quicksight': + print('Lambda function called via EventBridge') + calledViaEB = True + if 'resources' in event: + updated_dashboard_id = event['resources'].pop().split('dashboard/')[1] + + if updated_dashboard_id not in asset_id_list: + print('This lambda is configured to promote dashboards configured in the DDB table {table_name} whose ids are {dashboard_ids}, however the updated dashboard in event is {updated_dashboard_id}. Skipping ...' + .format(table_name=TRACKED_ASSETS_TABLE_NAME, dashboard_ids=asset_id_list, updated_dashboard_id=updated_dashboard_id)) + return { + 'statusCode': 200 + } + if REPLICATION_METHOD == 'TEMPLATE': replication_handler = replicate_dashboard_via_template elif REPLICATION_METHOD == 'ASSETS_AS_BUNDLE': replication_handler = replicate_dashboard_via_AAB + + source_account_yaml, dest_account_yaml = replication_handler(analysisObjList, remap) - try: - source_account_yaml, dest_account_yaml = replication_handler(analysis, remap) - - if REPLICATION_METHOD == 'ASSETS_AS_BUNDLE': - dest_account_yaml = add_permissions_to_AAB_resources(dest_account_yaml) + if REPLICATION_METHOD == 'ASSETS_AS_BUNDLE': + dest_account_yaml = add_permissions_to_AAB_resources(dest_account_yaml) - QSSourceAssetsFilename = '{output_dir}/QStemplate_CFN_SOURCE.yaml'.format(output_dir=OUTPUT_DIR) + try: + QSSourceAssetsFilename = '{output_dir}/QS_assets_CFN_SOURCE.yaml'.format(output_dir=OUTPUT_DIR) writeToFile(filename=QSSourceAssetsFilename, content=source_account_yaml) - + QSDestAssetsFilename = '{output_dir}/QS_assets_CFN_DEST.yaml'.format(output_dir=OUTPUT_DIR) writeToFile(filename=QSDestAssetsFilename, content=dest_account_yaml) - # Upload assets to S3 in deployment account - - credentials = assumeRoleInDeplAccount(role_arn=DEPLOYMENT_DEV_ACCOUNT_ROLE_ARN) - - CONFIGURATION_FILES_PREFIX = CONFIGURATION_FILES_PREFIX.format(pipeline_name=PIPELINE_NAME) - ASSETS_FILES_PREFIX = ASSETS_FILES_PREFIX.format(pipeline_name=PIPELINE_NAME) - source_param_list = generate_cloudformation_template_parameters(template_content=source_account_yaml) dest_param_list = generate_cloudformation_template_parameters(template_content=dest_account_yaml) @@ -1446,25 +2157,32 @@ def lambda_handler(event, context): print("{mode} was requested, generating sample configuration files in {config_files_prefix} prefix on {bucket} in the deployment account {deployment_account} to be filled with \ parametrized values for each environment".format(mode=MODE, config_files_prefix=CONFIGURATION_FILES_PREFIX, bucket=DEPLOYMENT_S3_BUCKET, deployment_account=DEPLOYMENT_ACCOUNT_ID)) - + + source_param_help = summarize_template(template_content=source_account_yaml, templateName="SourceAssets", s3Credentials=credentials, conf_files_prefix=CONFIGURATION_FILES_PREFIX) + dest_param_help = summarize_template(template_content=dest_account_yaml, templateName="DestinationAssets", s3Credentials=credentials, conf_files_prefix=CONFIGURATION_FILES_PREFIX) + for stage in deployment_stages: source_assets_param_file_path = writeToFile('{output_dir}/source_cfn_template_parameters_{stage}.txt'.format(output_dir=OUTPUT_DIR, stage=stage.strip()), content=source_param_list, format='json') uploadFileToS3(bucket=DEPLOYMENT_S3_BUCKET, filename=source_assets_param_file_path, prefix=CONFIGURATION_FILES_PREFIX, region=DEPLOYMENT_S3_REGION, bucket_owner=DEPLOYMENT_ACCOUNT_ID, credentials=credentials) dest_assets_param_file_path = writeToFile('{output_dir}/dest_cfn_template_parameters_{stage}.txt'.format(output_dir=OUTPUT_DIR, stage=stage.strip()), content=dest_param_list, format='json') uploadFileToS3(bucket=DEPLOYMENT_S3_BUCKET, filename=dest_assets_param_file_path, prefix=CONFIGURATION_FILES_PREFIX, bucket_owner=DEPLOYMENT_ACCOUNT_ID, region=DEPLOYMENT_S3_REGION, credentials=credentials) - summarize_template(template_content=source_account_yaml, templateName="SourceAssets", s3Credentials=credentials, conf_files_prefix=CONFIGURATION_FILES_PREFIX) - summarize_template(template_content=dest_account_yaml, templateName="DestinationAssets", s3Credentials=credentials, conf_files_prefix=CONFIGURATION_FILES_PREFIX) + # Store parameter definition initialization for each stage in DDB tables + #source Params + store_dashboard_parameter_definition_in_dynamo(table_name=PARAMETER_DEFINITION_TABLE_NAME, assetType="source", stage=stage.strip(), parameter_definition=json.dumps(source_param_list, indent=2), + parameter_help=json.dumps(source_param_help, indent=2), region=AWS_REGION, credentials=credentials) + #dest Params + store_dashboard_parameter_definition_in_dynamo(table_name=PARAMETER_DEFINITION_TABLE_NAME, assetType="dest", stage=stage.strip(), parameter_definition=json.dumps(dest_param_list, indent=2), + parameter_help=json.dumps(dest_param_help, indent=2), region=AWS_REGION, credentials=credentials) + elif calledViaEB or (MODE == 'DEPLOY'): try: - for stage in deployment_stages: - print("Checking source CFN parameters in CFN template for {stage}".format(stage=stage)) - check_parameters_cloudformation(param_list=source_param_list, key='{prefix}/source_cfn_template_parameters_{stage}.txt'.format(prefix=CONFIGURATION_FILES_PREFIX, stage=stage.strip()), region=AWS_REGION, credentials=credentials) - print("Checking dest CFN parameters in CFN template for {stage}".format(stage=stage)) - check_parameters_cloudformation(param_list=dest_param_list, key='{prefix}/dest_cfn_template_parameters_{stage}.txt'.format(prefix=CONFIGURATION_FILES_PREFIX, stage=stage.strip()), region=AWS_REGION, credentials=credentials) + check_parameters_cloudformation(template_param_list=source_param_list, region=AWS_REGION, credentials=credentials, assetType="source") + + check_parameters_cloudformation(template_param_list=dest_param_list, region=AWS_REGION, credentials=credentials, assetType="dest") except ValueError as error: - print('There was an issue with the CFN parameters file for stage, correct your CFN parameter file or run the function again with MODE: ''INITIALIZE'''.format(stage=stage)) + print('There was an issue with the CFN parameters file for stage, correct your CFN parameter file or run the function again with MODE: ''INITIALIZE''') raise ValueError(error) @@ -1473,7 +2191,7 @@ def lambda_handler(event, context): # Create source artifact file zip_file = '{output_dir}/SOURCE_assets_CFN.zip'.format(output_dir=OUTPUT_DIR) - + source_files = get_s3_objects(bucket=DEPLOYMENT_S3_BUCKET, prefix='{config_files_prefix}/source_cfn_template_parameters_'.format(config_files_prefix=CONFIGURATION_FILES_PREFIX), region=DEPLOYMENT_S3_REGION, credentials=credentials) source_files.append(QSSourceAssetsFilename) @@ -1481,6 +2199,16 @@ def lambda_handler(event, context): # Create dest artifact file zip_file = '{output_dir}/DEST_assets_CFN.zip'.format(output_dir=OUTPUT_DIR) + + if generate_nested_stacks: + if REPLICATION_METHOD == 'ASSETS_AS_BUNDLE': + resource_id_mapping = generate_resource_id_mapping(template_content=dest_account_yaml) + updated_dest_account_yaml = change_stack_references_to_ids(template_content=dest_account_yaml, resource_id_mapping=resource_id_mapping) + grouped_resources_content, grouped_parameters_content = split_stack_resources_and_parameters_into_groups(updated_dest_account_yaml) + else: + grouped_resources_content, grouped_parameters_content = split_stack_resources_and_parameters_into_groups(dest_account_yaml) + parent_dest_stack_yaml = generate_nested_stacks_from_grouped_resources(grouped_resources_content=grouped_resources_content, grouped_parameters_content=grouped_parameters_content, credentials=credentials) + writeToFile(filename=QSDestAssetsFilename, content=parent_dest_stack_yaml) dest_files = get_s3_objects(bucket=DEPLOYMENT_S3_BUCKET, prefix='{config_files_prefix}/dest_cfn_template_parameters_'.format(config_files_prefix=CONFIGURATION_FILES_PREFIX), region=DEPLOYMENT_S3_REGION, credentials=credentials) dest_files.append(QSDestAssetsFilename) @@ -1489,10 +2217,10 @@ def lambda_handler(event, context): except ValueError as error: return { 'statusCode': 500, - 'error': str(error) + 'error': str(error) } return { - 'statusCode': 200 + 'statusCode': 200 } \ No newline at end of file diff --git a/source/lambda/qs_assets_CFN_synthesizer/helpers/analysis.py b/source/lambda/qs_assets_CFN_synthesizer/helpers/analysis.py index e03a45c..6c2318f 100644 --- a/source/lambda/qs_assets_CFN_synthesizer/helpers/analysis.py +++ b/source/lambda/qs_assets_CFN_synthesizer/helpers/analysis.py @@ -1,6 +1,7 @@ class QSAnalysisDef: name ='' id = '' + arn = '' CFNId = '' datasets = {} QSUser = '' @@ -9,8 +10,9 @@ class QSAnalysisDef: AccountId = '' TemplateId = '' PipelineName = '' + AssociatedDashboardId = '' - def __init__(self, name: str, arn: str, QSUser:str, QSRegion:str, QSAdminRegion:str, AccountId:str, TemplateId:str, PipelineName:str): + def __init__(self, name: str, arn: str, QSUser:str, QSRegion:str, QSAdminRegion:str, AccountId:str, PipelineName:str, AssociatedDashboardId: str): self.name = name self.arn = arn self.id = arn.split('analysis/')[-1] @@ -19,8 +21,9 @@ def __init__(self, name: str, arn: str, QSUser:str, QSRegion:str, QSAdminRegion: self.QSRegion = QSRegion self.QSAdminRegion = QSAdminRegion self.AccountId = AccountId - self.TemplateId = TemplateId + self.TemplateId = '{analysis_name}-template'.format(analysis_name=name.replace(' ', '-')) self.PipelineName = PipelineName + self.AssociatedDashboardId = AssociatedDashboardId def getDependingDatasets(self): diff --git a/source/lambda/qs_assets_CFN_synthesizer/helpers/datasources.py b/source/lambda/qs_assets_CFN_synthesizer/helpers/datasources.py index db5fde4..b011fe6 100644 --- a/source/lambda/qs_assets_CFN_synthesizer/helpers/datasources.py +++ b/source/lambda/qs_assets_CFN_synthesizer/helpers/datasources.py @@ -57,6 +57,7 @@ class QSRDSDatasourceDef(QSDataSourceDef): type = '' vpcConnectionArn = '' secretArn = '' + parameters = {} def __init__(self, name: str, arn: str, parameters: object, type: SourceType, index: int): if 'VpcConnectionArn' in parameters: @@ -67,6 +68,7 @@ def __init__(self, name: str, arn: str, parameters: object, type: SourceType, i self.database = parameters['Database'] if 'SecretArn' in parameters: self.secretArn = parameters['SecretArn'] + self.parameters = parameters self.type = type super().__init__(name, arn, index) diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/datasource_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/datasource_resource_CFN_skel.yaml similarity index 100% rename from source/lambda/qs_assets_CFN_synthesizer/resources/datasource_CFN_skel.yaml rename to source/lambda/qs_assets_CFN_synthesizer/resources/datasource_resource_CFN_skel.yaml diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/dest_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/dest_CFN_skel.yaml index 7ed9841..fa36db1 100644 --- a/source/lambda/qs_assets_CFN_synthesizer/resources/dest_CFN_skel.yaml +++ b/source/lambda/qs_assets_CFN_synthesizer/resources/dest_CFN_skel.yaml @@ -3,7 +3,7 @@ Description: 'Automated deployment of QuickSight Assets.' Parameters: SourceAccountID: - Description: Account ID where the QuickSight template resides (source account) + Description: Account ID where the QuickSight template resides, this the previous stage account (e.g. If setting parameters for PRE stage it should be the AWS Account used for Development workloads) Type: String QSUser: Description: QS Username in Account where the assets will be created diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/dest_parent_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/dest_parent_CFN_skel.yaml new file mode 100644 index 0000000..d5403c1 --- /dev/null +++ b/source/lambda/qs_assets_CFN_synthesizer/resources/dest_parent_CFN_skel.yaml @@ -0,0 +1,22 @@ +AWSTemplateFormatVersion: 2010-09-09 +Description: 'Automated deployment of QuickSight Assets.' + +Resources: + datasourceStack: + Type: 'AWS::CloudFormation::Stack' + DeletionPolicy: Retain + Properties: + TemplateURL: >- + https://s3.amazonaws.com/cloudformation-templates-sample/datasource.yaml + Parameters: + ParamValue: value + KeyName: mykey + datasetStack: + Type: 'AWS::CloudFormation::Stack' + DeletionPolicy: Retain + Properties: + TemplateURL: >- + https://s3.amazonaws.com/cloudformation-templates-sample/dataset.yaml + Parameters: + ParamValue: value + KeyName: mykey \ No newline at end of file diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/nested_stack_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/nested_stack_CFN_skel.yaml new file mode 100644 index 0000000..e50b063 --- /dev/null +++ b/source/lambda/qs_assets_CFN_synthesizer/resources/nested_stack_CFN_skel.yaml @@ -0,0 +1,8 @@ +Type: 'AWS::CloudFormation::Stack' +DeletionPolicy: Retain +Properties: + TemplateURL: >- + https://s3.amazonaws.com/cloudformation-templates-us-east-2/EC2ChooseAMI.template + Parameters: + ParamValue: value + KeyName: mykey \ No newline at end of file diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/source_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/source_CFN_skel.yaml new file mode 100644 index 0000000..d7cdb6d --- /dev/null +++ b/source/lambda/qs_assets_CFN_synthesizer/resources/source_CFN_skel.yaml @@ -0,0 +1,12 @@ +AWSTemplateFormatVersion: 2010-09-09 +Description: Automated deployment of QuickSight Assets. +Parameters: + DestAccountID: + Description: Account ID that needs to get access to the template, this the current stage account (e.g. If setting parameters for PRE stage it should be the AWS Account used for Preproduction workloads). + Type: String + QSUser: + Description: QS Username in Account ID where to own the template that will be created. + Type: String + SrcQSAdminRegion: + Description: "Admin region for your QS source account where your users are hosted." + Type: String diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/template_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/template_CFN_skel.yaml deleted file mode 100644 index 1f4530d..0000000 --- a/source/lambda/qs_assets_CFN_synthesizer/resources/template_CFN_skel.yaml +++ /dev/null @@ -1,45 +0,0 @@ -AWSTemplateFormatVersion: 2010-09-09 -Description: Automated deployment of QuickSight Assets. -Parameters: - DestAccountID: - Description: Account ID that needs to get access to the template (dest account) - Type: String - QSUser: - Description: QS Username in Account ID where to own the template that will be created - Type: String - SrcQSAdminRegion: - Description: "Admin region for your QS source account where your users are hosted" - Type: String -Resources: - CICDQSTemplate: - Type: 'AWS::QuickSight::Template' - Properties: - TemplateId: '' - Name: '' - AwsAccountId: - Ref: 'AWS::AccountId' - SourceEntity: - SourceAnalysis: - Arn: - Fn::Sub: 'arn:aws:quicksight:${AWS::Region}:${AWS::AccountId}:analysis/{analysis_id}' - DataSetReferences: - - DataSetArn: - Fn::Sub: 'arn:aws:quicksight:${AWS::Region}:${AWS::AccountId}:dataset/{dataset_id}' - DataSetPlaceholder: '' - - Permissions: - - Actions: - - quicksight:DescribeTemplate - - quicksight:UpdateTemplate - - quicksight:DeleteTemplate - - quicksight:UpdateTemplateAlias - - quicksight:DescribeTemplatePermissions - - quicksight:UpdateTemplatePermissions - Principal: - Fn::Sub: 'arn:aws:quicksight:${SrcQSAdminRegion}:${AWS::AccountId}:user/default/${QSUser}' - - Actions: - - quicksight:DescribeTemplate - - quicksight:DescribeTemplateAlias - Principal: - Fn::Sub: 'arn:aws:iam::${DestAccountID}:root' - VersionDescription: Initial version - Copied over from AWS account. \ No newline at end of file diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/template_resource_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/template_resource_CFN_skel.yaml new file mode 100644 index 0000000..c376e0f --- /dev/null +++ b/source/lambda/qs_assets_CFN_synthesizer/resources/template_resource_CFN_skel.yaml @@ -0,0 +1,31 @@ +Type: 'AWS::QuickSight::Template' +Properties: + TemplateId: '' + Name: '' + AwsAccountId: + Ref: 'AWS::AccountId' + SourceEntity: + SourceAnalysis: + Arn: + Fn::Sub: 'arn:aws:quicksight:${AWS::Region}:${AWS::AccountId}:analysis/{analysis_id}' + DataSetReferences: + - DataSetArn: + Fn::Sub: 'arn:aws:quicksight:${AWS::Region}:${AWS::AccountId}:dataset/{dataset_id}' + DataSetPlaceholder: '' + + Permissions: + - Actions: + - quicksight:DescribeTemplate + - quicksight:UpdateTemplate + - quicksight:DeleteTemplate + - quicksight:UpdateTemplateAlias + - quicksight:DescribeTemplatePermissions + - quicksight:UpdateTemplatePermissions + Principal: + Fn::Sub: 'arn:aws:quicksight:${SrcQSAdminRegion}:${AWS::AccountId}:user/default/${QSUser}' + - Actions: + - quicksight:DescribeTemplate + - quicksight:DescribeTemplateAlias + Principal: + Fn::Sub: 'arn:aws:iam::${DestAccountID}:root' + VersionDescription: Initial version - Copied over from AWS account. \ No newline at end of file diff --git a/source/lambda/qs_assets_CFN_synthesizer/resources/theme_resource_CFN_skel.yaml b/source/lambda/qs_assets_CFN_synthesizer/resources/theme_resource_CFN_skel.yaml new file mode 100644 index 0000000..7327dea --- /dev/null +++ b/source/lambda/qs_assets_CFN_synthesizer/resources/theme_resource_CFN_skel.yaml @@ -0,0 +1,23 @@ +Type: AWS::QuickSight::Theme +Properties: + AwsAccountId: '' + BaseThemeId: '' + Configuration: + Name: '' + Permissions: + - Principal: + Fn::Sub: 'arn:aws:quicksight:${DstQSAdminRegion}:${AWS::AccountId}:user/default/${QSUser}' + Actions: + - quicksight:UpdateThemeAlias + - quicksight:ListThemeVersions + - quicksight:DescribeThemeAlias + - quicksight:UpdateThemePermissions + - quicksight:DeleteThemeAlias + - quicksight:DeleteTheme + - quicksight:ListThemeAliases + - quicksight:DescribeTheme + - quicksight:CreateThemeAlias + - quicksight:UpdateTheme + - quicksight:DescribeThemePermissions + ThemeId: '' + VersionDescription: ''