diff --git a/fast/stages/0-bootstrap/README.md b/fast/stages/0-bootstrap/README.md index 14e796e0fc..5643f3b158 100644 --- a/fast/stages/0-bootstrap/README.md +++ b/fast/stages/0-bootstrap/README.md @@ -165,7 +165,7 @@ What is implemented here is a fairly common convention, composed of tokens order - a context identifier (e.g. `core` or `kms`) - an arbitrary identifier used to distinguish similar resources (e.g. `0`, `1`) -> [!WARNING] +> [!WARNING] > When using tenant factory, a tenant prefix will be automatically generated as `{prefix}-{tenant-shortname}`. The maximum length of such prefix must be 11 characters or less, which means that the longer org-level prefix you use, the less chars you'll have available for the `tenant-shortname`. Tokens are joined by a `-` character, making it easy to separate the individual tokens visually, and to programmatically split them in billing exports to derive initial high-level groupings for cost attribution. @@ -367,8 +367,8 @@ Below is the outline of the output files generated by all stages, which is ident │   ├── 1-resman-providers.tf │   ├── 2-networking-providers.tf │   ├── 2-security-providers.tf -│   ├── 3-project-factory-dev-providers.tf -│   ├── 3-project-factory-prod-providers.tf +│   ├── 2-project-factory-dev-providers.tf +│   ├── 2-project-factory-prod-providers.tf │   └── 9-sandbox-providers.tf └── tfvars │ ├── 0-bootstrap.auto.tfvars.json diff --git a/fast/stages/1-resman/outputs.tf b/fast/stages/1-resman/outputs.tf index f640c33925..fedc572ae5 100644 --- a/fast/stages/1-resman/outputs.tf +++ b/fast/stages/1-resman/outputs.tf @@ -111,8 +111,8 @@ locals { plan = try(module.branch-pf-r-sa-cicd[0].email, null) } tf_providers_files = { - apply = "3-project-factory-providers.tf" - plan = "3-project-factory-r-providers.tf" + apply = "2-project-factory-providers.tf" + plan = "2-project-factory-r-providers.tf" } tf_var_files = local.cicd_workflow_var_files.stage_3 } @@ -122,8 +122,8 @@ locals { plan = try(module.branch-pf-dev-r-sa-cicd[0].email, null) } tf_providers_files = { - apply = "3-project-factory-dev-providers.tf" - plan = "3-project-factory-dev-r-providers.tf" + apply = "2-project-factory-dev-providers.tf" + plan = "2-project-factory-dev-r-providers.tf" } tf_var_files = local.cicd_workflow_var_files.stage_3 } @@ -133,8 +133,8 @@ locals { plan = try(module.branch-pf-prod-r-sa-cicd[0].email, null) } tf_providers_files = { - apply = "3-project-factory-prod-providers.tf" - plan = "3-project-factory-prod-r-providers.tf" + apply = "2-project-factory-prod-providers.tf" + plan = "2-project-factory-prod-r-providers.tf" } tf_var_files = local.cicd_workflow_var_files.stage_3 } diff --git a/fast/stages/CLEANUP.md b/fast/stages/CLEANUP.md index f00580753a..a477e2a47a 100644 --- a/fast/stages/CLEANUP.md +++ b/fast/stages/CLEANUP.md @@ -4,13 +4,6 @@ If you want to destroy a previous FAST deployment in your organization, follow t Destruction must be done in reverse order, from stage 3 to stage 0 -## Stage 3 (Project Factory) - -```bash -cd $FAST_PWD/3-project-factory/dev/ -terraform destroy -``` - ## Stage 3 (GKE) Terraform refuses to delete non-empty GCS buckets and BigQuery datasets, so they need to be removed manually from the state. @@ -19,8 +12,8 @@ Terraform refuses to delete non-empty GCS buckets and BigQuery datasets, so they cd $FAST_PWD/3-gke-multitenant/dev/ # remove BQ dataset manually -for x in $(terraform state list | grep google_bigquery_dataset); do - terraform state rm "$x"; +for x in $(terraform state list | grep google_bigquery_dataset); do + terraform state rm "$x"; done terraform destroy @@ -34,17 +27,24 @@ Terraform refuses to delete non-empty GCS buckets and BigQuery datasets, so they cd $FAST_PWD/3-data-platform/dev/ # remove GCS buckets and BQ dataset manually. Projects will be destroyed anyway -for x in $(terraform state list | grep google_storage_bucket.bucket); do - terraform state rm "$x"; +for x in $(terraform state list | grep google_storage_bucket.bucket); do + terraform state rm "$x"; done -for x in $(terraform state list | grep google_bigquery_dataset); do - terraform state rm "$x"; +for x in $(terraform state list | grep google_bigquery_dataset); do + terraform state rm "$x"; done terraform destroy ``` +## Stage 2 (Project Factory) + +```bash +cd $FAST_PWD/2-project-factory/ +terraform destroy +``` + ## Stage 2 (Security) ```bash @@ -69,7 +69,7 @@ Stage 1 is a little more complicated because of the GCS buckets containing your cd $FAST_PWD/1-resman/ # remove buckets from state since terraform refuses to delete them -for x in $(terraform state list | grep google_storage_bucket.bucket); do +for x in $(terraform state list | grep google_storage_bucket.bucket); do terraform state rm "$x" done @@ -89,12 +89,12 @@ export FAST_BU=$(gcloud config list --format 'value(core.account)') terraform apply -var bootstrap_user=$FAST_BU # remove GCS buckets and BQ dataset manually. Projects will be destroyed anyway -for x in $(terraform state list | grep google_storage_bucket.bucket); do - terraform state rm "$x"; +for x in $(terraform state list | grep google_storage_bucket.bucket); do + terraform state rm "$x"; done -for x in $(terraform state list | grep google_bigquery_dataset); do - terraform state rm "$x"; +for x in $(terraform state list | grep google_bigquery_dataset); do + terraform state rm "$x"; done ## remove the providers file and migrate state @@ -110,7 +110,7 @@ When the destroy fails, continue with the steps below. Again, make sure your use ```bash # Add the Organization Admin role to $BU_USER in the GCP Console -# then execute the command below to grant yourself the permissions needed +# then execute the command below to grant yourself the permissions needed # to finish the destruction export FAST_DESTROY_ROLES="roles/resourcemanager.projectDeleter \ roles/owner roles/resourcemanager.organizationAdmin"