From 87cd83f5c0982ef8e0b086a5ac16c531f5269ba1 Mon Sep 17 00:00:00 2001 From: Alejandro Leal Date: Sat, 13 May 2023 23:51:46 -0400 Subject: [PATCH 1/4] Several updates Several updates --- CONTRIBUTING.md | 4 ++-- .../nb-glb-psc-neg-sb-psc-ilbl7-hybrid-neg/README.md | 2 +- .../network-dashboard/deploy-cloud-function/variables.tf | 2 +- blueprints/cloud-operations/quota-monitoring/README.md | 2 +- .../gcp-workload-identity-provider/main.tf | 2 +- .../function/healthchecker/configuration.go | 2 +- .../vm-migration/host-target-projects/README.md | 2 +- .../vm-migration/host-target-projects/variables.tf | 2 +- .../vm-migration/host-target-sharedvpc/README.md | 2 +- .../vm-migration/host-target-sharedvpc/variables.tf | 2 +- .../cloud-operations/vm-migration/single-project/README.md | 2 +- .../vm-migration/single-project/variables.tf | 2 +- blueprints/data-solutions/bq-ml/demo/README.md | 2 +- blueprints/data-solutions/bq-ml/demo/bmql_pipeline.ipynb | 4 ++-- blueprints/data-solutions/cloudsql-multiregion/README.md | 4 ++-- .../data-platform-foundations/04-transformation.tf | 2 +- .../demo/dataflow-csv2bq/src/csv2bq.py | 2 +- blueprints/data-solutions/data-playground/README.md | 6 +++--- blueprints/data-solutions/data-playground/variables.tf | 2 +- blueprints/data-solutions/shielded-folder/README.md | 2 +- blueprints/data-solutions/shielded-folder/variables.tf | 4 ++-- blueprints/data-solutions/vertex-mlops/README.md | 2 +- blueprints/data-solutions/vertex-mlops/metadata.yaml | 2 +- blueprints/data-solutions/vertex-mlops/variables.tf | 2 +- blueprints/factories/bigquery-factory/README.md | 2 +- blueprints/gke/multitenant-fleet/README.md | 2 +- blueprints/gke/multitenant-fleet/variables.tf | 2 +- blueprints/networking/filtering-proxy-psc/squid.conf | 2 +- blueprints/networking/psc-hybrid/README.md | 2 +- blueprints/networking/psc-hybrid/variables.tf | 2 +- blueprints/serverless/cloud-run-corporate/README.md | 6 +++--- blueprints/third-party-solutions/README.md | 2 +- blueprints/third-party-solutions/openshift/README.md | 2 +- blueprints/third-party-solutions/openshift/tf/README.md | 2 +- blueprints/third-party-solutions/openshift/tf/variables.tf | 2 +- fast/stages/0-bootstrap/README.md | 2 +- fast/stages/2-networking-a-peering/README.md | 2 +- fast/stages/2-networking-b-vpn/README.md | 4 ++-- fast/stages/2-networking-c-nva/README.md | 2 +- fast/stages/2-networking-d-separate-envs/README.md | 2 +- fast/stages/2-networking-e-nva-bgp/README.md | 6 +++--- fast/stages/2-networking-e-nva-bgp/ncc.tf | 2 +- fast/stages/2-security/README.md | 4 ++-- fast/stages/2-security/variables.tf | 4 ++-- fast/stages/3-data-platform/dev/README.md | 6 +++--- fast/stages/3-gke-multitenant/dev/README.md | 4 ++-- fast/stages/3-gke-multitenant/dev/variables.tf | 2 +- modules/binauthz/README.md | 2 +- modules/cloud-config-container/README.md | 2 +- .../onprem/docker-images/strongswan/entrypoint.sh | 2 +- .../onprem/docker-images/strongswan/ipsec-vti.sh | 2 +- modules/cloud-config-container/squid/squid.conf | 2 +- modules/cloud-run/README.md | 2 +- modules/compute-vm/README.md | 4 ++-- modules/endpoints/README.md | 2 +- modules/endpoints/variables.tf | 2 +- modules/gke-cluster-standard/main.tf | 2 +- modules/gke-hub/README.md | 2 +- modules/gke-hub/variables.tf | 2 +- modules/net-glb/README.md | 2 +- modules/projects-data-source/README.md | 2 +- modules/projects-data-source/variables.tf | 2 +- tests/modules/cloud_config_container_mysql/test_plan.py | 2 +- tools/state_iam.py | 4 ++-- 64 files changed, 82 insertions(+), 82 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1885c1e7b8..28a7c5b64d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -981,7 +981,7 @@ tests: # run a test named `test-plan`, load the specified tfvars files # use the default inventory file of `test-plan.yaml` test-plan: - tfvars: # if ommited, we load test-plan.tfvars by default + tfvars: # if omitted, we load test-plan.tfvars by default - test-plan.tfvars - test-plan-extra.tfvars inventory: @@ -991,7 +991,7 @@ tests: # extra_files: # - ../plugin-x/*.tf - # You can ommit the tfvars and inventory sections and they will + # You can omit the tfvars and inventory sections and they will # default to the name of the test. The following two examples are equivalent: # # test-plan2: diff --git a/blueprints/apigee/network-patterns/nb-glb-psc-neg-sb-psc-ilbl7-hybrid-neg/README.md b/blueprints/apigee/network-patterns/nb-glb-psc-neg-sb-psc-ilbl7-hybrid-neg/README.md index 0ec240b0f2..1919c6571f 100644 --- a/blueprints/apigee/network-patterns/nb-glb-psc-neg-sb-psc-ilbl7-hybrid-neg/README.md +++ b/blueprints/apigee/network-patterns/nb-glb-psc-neg-sb-psc-ilbl7-hybrid-neg/README.md @@ -6,7 +6,7 @@ The architecture is the one depicted below. ![Diagram](diagram.png) -To emulate an service deployed on-premise, we have used a managed instance group of instances running Nginx exposed via a regional internalload balancer (L7). The service is accesible through VPN. +To emulate an service deployed on-premise, we have used a managed instance group of instances running Nginx exposed via a regional internalload balancer (L7). The service is accessible through VPN. ## Running the blueprint diff --git a/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/variables.tf b/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/variables.tf index 8cc64e1f2a..5793d8af66 100644 --- a/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/variables.tf +++ b/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/variables.tf @@ -46,7 +46,7 @@ variable "dashboard_json_path" { } variable "discovery_config" { - description = "Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empy, every project under the discovery root node will be monitored." + description = "Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empty, every project under the discovery root node will be monitored." type = object({ discovery_root = string monitored_folders = list(string) diff --git a/blueprints/cloud-operations/quota-monitoring/README.md b/blueprints/cloud-operations/quota-monitoring/README.md index 571ffb2b02..3d466f2f37 100644 --- a/blueprints/cloud-operations/quota-monitoring/README.md +++ b/blueprints/cloud-operations/quota-monitoring/README.md @@ -28,7 +28,7 @@ Labels are set with project id (which may differ from the monitoring workspace p GCP Metrics Explorer, usage, limit and utilization view sample -The solution can also create a basic monitoring alert policy, to demonstrate how to raise alerts when quotas utilization goes over a predefined threshold, to enable it, set variable `alert_create` to true and reapply main.tf after main.py has run at least one and quota monitoring metrics have been creaed. +The solution can also create a basic monitoring alert policy, to demonstrate how to raise alerts when quotas utilization goes over a predefined threshold, to enable it, set variable `alert_create` to true and reapply main.tf after main.py has run at least one and quota monitoring metrics have been created. ## Running the blueprint diff --git a/blueprints/cloud-operations/terraform-cloud-dynamic-credentials/gcp-workload-identity-provider/main.tf b/blueprints/cloud-operations/terraform-cloud-dynamic-credentials/gcp-workload-identity-provider/main.tf index e4275350d5..acffcb95b6 100644 --- a/blueprints/cloud-operations/terraform-cloud-dynamic-credentials/gcp-workload-identity-provider/main.tf +++ b/blueprints/cloud-operations/terraform-cloud-dynamic-credentials/gcp-workload-identity-provider/main.tf @@ -81,7 +81,7 @@ module "sa-tfc" { iam = { # We allow only tokens generated by a specific TFC workspace impersonation of the service account, - # that way one identity pool can be used for a TFC Organization, but every workspace will be able to impersonate only a specifc SA + # that way one identity pool can be used for a TFC Organization, but every workspace will be able to impersonate only a specific SA "roles/iam.workloadIdentityUser" = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.tfc-pool.name}/attribute.terraform_workspace_id/${var.tfc_workspace_id}"] } diff --git a/blueprints/cloud-operations/unmanaged-instances-healthcheck/function/healthchecker/configuration.go b/blueprints/cloud-operations/unmanaged-instances-healthcheck/function/healthchecker/configuration.go index 8256230505..16a839c22c 100644 --- a/blueprints/cloud-operations/unmanaged-instances-healthcheck/function/healthchecker/configuration.go +++ b/blueprints/cloud-operations/unmanaged-instances-healthcheck/function/healthchecker/configuration.go @@ -41,7 +41,7 @@ func getEnv(key, fallback string) string { return fallback } -// GetConfiguration generates configration by reading ENV variables. +// GetConfiguration generates configuration by reading ENV variables. func GetConfiguration() (*Configuration, error) { timeout, err := time.ParseDuration(getEnv("TIMEOUT", "1000ms")) if err != nil { diff --git a/blueprints/cloud-operations/vm-migration/host-target-projects/README.md b/blueprints/cloud-operations/vm-migration/host-target-projects/README.md index caf7649a6d..92c35a2b5a 100644 --- a/blueprints/cloud-operations/vm-migration/host-target-projects/README.md +++ b/blueprints/cloud-operations/vm-migration/host-target-projects/README.md @@ -27,7 +27,7 @@ This sample creates\updates several distinct groups of resources: |---|---|:---:|:---:|:---:| | [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | list(string) | ✓ | | | [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | list(string) | ✓ | | -| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | list(string) | | [] | +| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | list(string) | | [] | | [project_create](variables.tf#L31) | Parameters for the creation of the new project to host the M4CE backend. | object({…}) | | null | | [project_name](variables.tf#L40) | Name of an existing project or of the new project assigned as M4CE host project. | string | | "m4ce-host-project-000" | diff --git a/blueprints/cloud-operations/vm-migration/host-target-projects/variables.tf b/blueprints/cloud-operations/vm-migration/host-target-projects/variables.tf index c210fa3159..b4a91ad9fa 100644 --- a/blueprints/cloud-operations/vm-migration/host-target-projects/variables.tf +++ b/blueprints/cloud-operations/vm-migration/host-target-projects/variables.tf @@ -23,7 +23,7 @@ variable "migration_target_projects" { } variable "migration_viewer_users" { - description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format." + description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format." type = list(string) default = [] } diff --git a/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/README.md b/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/README.md index fbabb2a8ac..5a1b3fbcde 100644 --- a/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/README.md +++ b/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/README.md @@ -29,7 +29,7 @@ This sample creates\update several distinct groups of resources: | [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | list(string) | ✓ | | | [migration_target_projects](variables.tf#L20) | List of target projects for m4ce workload migrations. | list(string) | ✓ | | | [sharedvpc_host_projects](variables.tf#L45) | List of host projects that share a VPC with the selected target projects. | list(string) | ✓ | | -| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | list(string) | | [] | +| [migration_viewer_users](variables.tf#L25) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | list(string) | | [] | | [project_create](variables.tf#L30) | Parameters for the creation of the new project to host the M4CE backend. | object({…}) | | null | | [project_name](variables.tf#L39) | Name of an existing project or of the new project assigned as M4CE host project. | string | | "m4ce-host-project-000" | diff --git a/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/variables.tf b/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/variables.tf index c01740dc47..6b94ae8872 100644 --- a/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/variables.tf +++ b/blueprints/cloud-operations/vm-migration/host-target-sharedvpc/variables.tf @@ -23,7 +23,7 @@ variable "migration_target_projects" { } variable "migration_viewer_users" { - description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format." + description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format." type = list(string) default = [] } diff --git a/blueprints/cloud-operations/vm-migration/single-project/README.md b/blueprints/cloud-operations/vm-migration/single-project/README.md index 2f3550ce70..0817a639e5 100644 --- a/blueprints/cloud-operations/vm-migration/single-project/README.md +++ b/blueprints/cloud-operations/vm-migration/single-project/README.md @@ -27,7 +27,7 @@ This sample creates several distinct groups of resources: | name | description | type | required | default | |---|---|:---:|:---:|:---:| | [migration_admin_users](variables.tf#L15) | List of users authorized to create a new M4CE sources and perform all other migration operations, in IAM format. | list(string) | ✓ | | -| [migration_viewer_users](variables.tf#L20) | List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format. | list(string) | | [] | +| [migration_viewer_users](variables.tf#L20) | List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format. | list(string) | | [] | | [project_create](variables.tf#L26) | Parameters for the creation of the new project to host the M4CE backend. | object({…}) | | null | | [project_name](variables.tf#L35) | Name of an existing project or of the new project assigned as M4CE host an target project. | string | | "m4ce-host-project-000" | | [vpc_config](variables.tf#L41) | Parameters to create a simple VPC on the M4CE project. | object({…}) | | {…} | diff --git a/blueprints/cloud-operations/vm-migration/single-project/variables.tf b/blueprints/cloud-operations/vm-migration/single-project/variables.tf index 3335254f23..967a739a7f 100644 --- a/blueprints/cloud-operations/vm-migration/single-project/variables.tf +++ b/blueprints/cloud-operations/vm-migration/single-project/variables.tf @@ -18,7 +18,7 @@ variable "migration_admin_users" { } variable "migration_viewer_users" { - description = "List of users authorized to retrive information about M4CE in the Google Cloud Console, in IAM format." + description = "List of users authorized to retrieve information about M4CE in the Google Cloud Console, in IAM format." type = list(string) default = [] } diff --git a/blueprints/data-solutions/bq-ml/demo/README.md b/blueprints/data-solutions/bq-ml/demo/README.md index 8ab748d8bd..f59d55fdfb 100644 --- a/blueprints/data-solutions/bq-ml/demo/README.md +++ b/blueprints/data-solutions/bq-ml/demo/README.md @@ -12,7 +12,7 @@ In this tutorial we will also see how to make explainable predictions, in order # Dataset -This tutorial uses a fictitious e-commerce dataset collecting programmatically generated data from the fictitious e-commerce store called The Look. The dataset is publicy available on BigQuery at this location `bigquery-public-data.thelook_ecommerce`. +This tutorial uses a fictitious e-commerce dataset collecting programmatically generated data from the fictitious e-commerce store called The Look. The dataset is publicly available on BigQuery at this location `bigquery-public-data.thelook_ecommerce`. # Goal diff --git a/blueprints/data-solutions/bq-ml/demo/bmql_pipeline.ipynb b/blueprints/data-solutions/bq-ml/demo/bmql_pipeline.ipynb index aa494da5e6..1acfe267f2 100644 --- a/blueprints/data-solutions/bq-ml/demo/bmql_pipeline.ipynb +++ b/blueprints/data-solutions/bq-ml/demo/bmql_pipeline.ipynb @@ -354,7 +354,7 @@ "outputs": [], "source": [ "# deploy the BigQuery ML model on Vertex Endpoint\n", - "# have a coffe - this step can take up 10/15 minutes to finish\n", + "# have a coffee - this step can take up 10/15 minutes to finish\n", "model.deploy(endpoint=endpoint, deployed_model_display_name='bqml-deployed-model')" ] }, @@ -436,7 +436,7 @@ "\n", "Thanks to this tutorial we were able to:\n", "- Define a re-usable Vertex AI pipeline to train and evaluate BQ ML models\n", - "- Use a Vertex AI Experiment to keep track of multiple trainings for the same model with different paramenters (in this case a different split for train/test data)\n", + "- Use a Vertex AI Experiment to keep track of multiple trainings for the same model with different parameters (in this case a different split for train/test data)\n", "- Deploy the preferred model on a Vertex AI managed Endpoint in order to serve the model for real-time use cases via API\n", "- Make batch prediction via Big Query and see what are the top 5 features which influenced the algorithm output" ] diff --git a/blueprints/data-solutions/cloudsql-multiregion/README.md b/blueprints/data-solutions/cloudsql-multiregion/README.md index 8b8e7e3e35..13b5be8a4e 100644 --- a/blueprints/data-solutions/cloudsql-multiregion/README.md +++ b/blueprints/data-solutions/cloudsql-multiregion/README.md @@ -60,7 +60,7 @@ Once you have the required information, head back to the cloud shell editor. Mak Configure the Terraform variables in your `terraform.tfvars` file. You need to specify at least the `project_id` and `prefix` variables. See [`terraform.tfvars.sample`](terraform.tfvars.sample) as starting point. -![Deploy ressources](images/image2.png) +![Deploy resources](images/image2.png) Run Terraform init: @@ -71,7 +71,7 @@ terraform apply The resource creation will take a few minutes, at the end this is the output you should expect for successful completion along with a list of the created resources: -![Ressources installed](images/image3.png) +![Resources installed](images/image3.png) ## Move to real use case consideration diff --git a/blueprints/data-solutions/data-platform-foundations/04-transformation.tf b/blueprints/data-solutions/data-platform-foundations/04-transformation.tf index 394adedf8a..dd599c978e 100644 --- a/blueprints/data-solutions/data-platform-foundations/04-transformation.tf +++ b/blueprints/data-solutions/data-platform-foundations/04-transformation.tf @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# tfdoc:file:description Trasformation project and VPC. +# tfdoc:file:description Transformation project and VPC. locals { iam_trf = { diff --git a/blueprints/data-solutions/data-platform-foundations/demo/dataflow-csv2bq/src/csv2bq.py b/blueprints/data-solutions/data-platform-foundations/demo/dataflow-csv2bq/src/csv2bq.py index 0f8ad12753..380a01cb43 100644 --- a/blueprints/data-solutions/data-platform-foundations/demo/dataflow-csv2bq/src/csv2bq.py +++ b/blueprints/data-solutions/data-platform-foundations/demo/dataflow-csv2bq/src/csv2bq.py @@ -22,7 +22,7 @@ class ParseRow(beam.DoFn): """ - Splits a given csv row by a seperator, validates fields and returns a dict + Splits a given csv row by a separator, validates fields and returns a dict structure compatible with the BigQuery transform """ diff --git a/blueprints/data-solutions/data-playground/README.md b/blueprints/data-solutions/data-playground/README.md index a2de4db963..5f534aa25e 100644 --- a/blueprints/data-solutions/data-playground/README.md +++ b/blueprints/data-solutions/data-playground/README.md @@ -22,7 +22,7 @@ As is often the case in real-world configurations, this blueprint accepts as inp If the network_config variable is not provided, one VPC will be created in each project that supports network resources (load, transformation and orchestration). -## Deploy your enviroment +## Deploy your environment We assume the identiy running the following steps has the following role: @@ -35,7 +35,7 @@ Run Terraform init: terraform init ``` -Configure the Terraform variable in your terraform.tfvars file. You need to spefify at least the following variables: +Configure the Terraform variable in your terraform.tfvars file. You need to specify at least the following variables: ``` prefix = "prefix" @@ -48,7 +48,7 @@ You can run now: terraform apply ``` -You can now connect to the Vertex AI notbook to perform your data analysy. +You can now connect to the Vertex AI notbook to perform your data analysis. ## Variables diff --git a/blueprints/data-solutions/data-playground/variables.tf b/blueprints/data-solutions/data-playground/variables.tf index 3bd0ca65b1..682e1a6d61 100644 --- a/blueprints/data-solutions/data-playground/variables.tf +++ b/blueprints/data-solutions/data-playground/variables.tf @@ -58,7 +58,7 @@ variable "region" { default = "europe-west1" } -variable "service_encryption_keys" { # service encription key +variable "service_encryption_keys" { # service encryption key description = "Cloud KMS to use to encrypt different services. Key location should match service region." type = object({ bq = string diff --git a/blueprints/data-solutions/shielded-folder/README.md b/blueprints/data-solutions/shielded-folder/README.md index c4b471bdd3..dff46169c6 100644 --- a/blueprints/data-solutions/shielded-folder/README.md +++ b/blueprints/data-solutions/shielded-folder/README.md @@ -112,7 +112,7 @@ The Shielded Folder blueprint is meant to be executed by a Service Account (or a - `roles/resourcemanager.folderAdmin` - `roles/resourcemanager.projectCreator` -The shielded Folfer blueprint assumes [groups described](#user-groups) are created in your GCP organization. +The shielded Folder blueprint assumes [groups described](#user-groups) are created in your GCP organization. ### Variable configuration PIPPO diff --git a/blueprints/data-solutions/shielded-folder/variables.tf b/blueprints/data-solutions/shielded-folder/variables.tf index a9ecbb241b..f4ec8acb19 100644 --- a/blueprints/data-solutions/shielded-folder/variables.tf +++ b/blueprints/data-solutions/shielded-folder/variables.tf @@ -188,7 +188,7 @@ variable "vpc_sc_access_levels" { } variable "vpc_sc_egress_policies" { - description = "VPC SC egress policy defnitions." + description = "VPC SC egress policy definitions." type = map(object({ from = object({ identity_type = optional(string, "ANY_IDENTITY") @@ -208,7 +208,7 @@ variable "vpc_sc_egress_policies" { } variable "vpc_sc_ingress_policies" { - description = "VPC SC ingress policy defnitions." + description = "VPC SC ingress policy definitions." type = map(object({ from = object({ access_levels = optional(list(string), []) diff --git a/blueprints/data-solutions/vertex-mlops/README.md b/blueprints/data-solutions/vertex-mlops/README.md index 4c918bbc27..e9cf4909f3 100644 --- a/blueprints/data-solutions/vertex-mlops/README.md +++ b/blueprints/data-solutions/vertex-mlops/README.md @@ -36,7 +36,7 @@ Assign roles relying on User groups is a way to decouple the final set of permis We use the following groups to control access to resources: -- *Data Scientits* (gcp-ml-ds@). They manage notebooks and create ML pipelines. +- *Data Scientist* (gcp-ml-ds@). They manage notebooks and create ML pipelines. - *ML Engineers* (gcp-ml-eng@). They manage the different Vertex resources. - *ML Viewer* (gcp-ml-eng@). Group with wiewer permission for the different resources. diff --git a/blueprints/data-solutions/vertex-mlops/metadata.yaml b/blueprints/data-solutions/vertex-mlops/metadata.yaml index d09c244738..bb12a5f93f 100644 --- a/blueprints/data-solutions/vertex-mlops/metadata.yaml +++ b/blueprints/data-solutions/vertex-mlops/metadata.yaml @@ -50,7 +50,7 @@ spec: interfaces: variables: - name: notebooks - description: Vertex AI workbenchs to be deployed. Service Account runtime/instances deployed. + description: Vertex AI workbenches to be deployed. Service Account runtime/instances deployed. type: |- map(object({ type = string diff --git a/blueprints/data-solutions/vertex-mlops/variables.tf b/blueprints/data-solutions/vertex-mlops/variables.tf index cc8200f9d4..d9e375dfb7 100644 --- a/blueprints/data-solutions/vertex-mlops/variables.tf +++ b/blueprints/data-solutions/vertex-mlops/variables.tf @@ -67,7 +67,7 @@ variable "network_config" { } variable "notebooks" { - description = "Vertex AI workbenchs to be deployed. Service Account runtime/instances deployed." + description = "Vertex AI workbenches to be deployed. Service Account runtime/instances deployed." type = map(object({ type = string machine_type = optional(string, "n1-standard-4") diff --git a/blueprints/factories/bigquery-factory/README.md b/blueprints/factories/bigquery-factory/README.md index 1e3015ed6d..baf2f6d269 100644 --- a/blueprints/factories/bigquery-factory/README.md +++ b/blueprints/factories/bigquery-factory/README.md @@ -10,7 +10,7 @@ You can create as many files as you like, the code will loop through it and crea ### Terraform code -In this section we show how to create tables and views from a file structure simlar to the one shown below. +In this section we show how to create tables and views from a file structure similar to the one shown below. ```bash bigquery │ diff --git a/blueprints/gke/multitenant-fleet/README.md b/blueprints/gke/multitenant-fleet/README.md index 0818479b4b..d36faa25ed 100644 --- a/blueprints/gke/multitenant-fleet/README.md +++ b/blueprints/gke/multitenant-fleet/README.md @@ -253,7 +253,7 @@ module "gke" { | [clusters](variables.tf#L22) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | | [fleet_configmanagement_clusters](variables.tf#L70) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | | [fleet_configmanagement_templates](variables.tf#L77) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | -| [fleet_features](variables.tf#L112) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | +| [fleet_features](variables.tf#L112) | Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | | [fleet_workload_identity](variables.tf#L125) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | | [group_iam](variables.tf#L137) | Project-level IAM bindings for groups. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | | [iam](variables.tf#L144) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | diff --git a/blueprints/gke/multitenant-fleet/variables.tf b/blueprints/gke/multitenant-fleet/variables.tf index 13760606e3..37975b60b2 100644 --- a/blueprints/gke/multitenant-fleet/variables.tf +++ b/blueprints/gke/multitenant-fleet/variables.tf @@ -110,7 +110,7 @@ variable "fleet_configmanagement_templates" { } variable "fleet_features" { - description = "Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used." + description = "Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used." type = object({ appdevexperience = bool configmanagement = bool diff --git a/blueprints/networking/filtering-proxy-psc/squid.conf b/blueprints/networking/filtering-proxy-psc/squid.conf index 85483c254f..1d79645284 100644 --- a/blueprints/networking/filtering-proxy-psc/squid.conf +++ b/blueprints/networking/filtering-proxy-psc/squid.conf @@ -41,7 +41,7 @@ http_access deny !safe_ports # deny CONNECT if connection is not using ssl http_access deny CONNECT !ssl_ports -# deny acccess to cachemgr +# deny access to cachemgr http_access deny manager # deny access to localhost through the proxy diff --git a/blueprints/networking/psc-hybrid/README.md b/blueprints/networking/psc-hybrid/README.md index 579c9ff4cf..5e2c8ff5a4 100644 --- a/blueprints/networking/psc-hybrid/README.md +++ b/blueprints/networking/psc-hybrid/README.md @@ -43,7 +43,7 @@ Before applying this Terraform | [dest_ip_address](variables.tf#L17) | On-prem service destination IP address. | string | ✓ | | | [prefix](variables.tf#L28) | Prefix used for resource names. | string | ✓ | | | [producer](variables.tf#L37) | Producer configuration. | object({…}) | ✓ | | -| [project_id](variables.tf#L53) | When referncing existing projects, the id of the project where resources will be created. | string | ✓ | | +| [project_id](variables.tf#L53) | When referencing existing projects, the id of the project where resources will be created. | string | ✓ | | | [region](variables.tf#L58) | Region where resources will be created. | string | ✓ | | | [subnet_consumer](variables.tf#L63) | Consumer subnet CIDR. | string # CIDR | ✓ | | | [zone](variables.tf#L102) | Zone where resources will be created. | string | ✓ | | diff --git a/blueprints/networking/psc-hybrid/variables.tf b/blueprints/networking/psc-hybrid/variables.tf index d5d818a8d9..17e2f65a0b 100644 --- a/blueprints/networking/psc-hybrid/variables.tf +++ b/blueprints/networking/psc-hybrid/variables.tf @@ -51,7 +51,7 @@ variable "project_create" { } variable "project_id" { - description = "When referncing existing projects, the id of the project where resources will be created." + description = "When referencing existing projects, the id of the project where resources will be created." type = string } diff --git a/blueprints/serverless/cloud-run-corporate/README.md b/blueprints/serverless/cloud-run-corporate/README.md index 0cccb509a9..a91df33b43 100644 --- a/blueprints/serverless/cloud-run-corporate/README.md +++ b/blueprints/serverless/cloud-run-corporate/README.md @@ -122,11 +122,11 @@ Note the different PSC endpoints created in each project and the different IPs. #### Use case 3.2 -It is possible to block access from the Internet restoring `ingress_settigns` to `"internal"` but this will also block access from any other project. This feature is interesting, as will be shown in the next use case. +It is possible to block access from the Internet restoring `ingress_settings` to `"internal"` but this will also block access from any other project. This feature is interesting, as will be shown in the next use case.

-Simply omit `ingress_settigns` in `terraform.tfvars`: +Simply omit `ingress_settings` in `terraform.tfvars`: ```tfvars prj_main_id = "[your-main-project-id]" @@ -135,7 +135,7 @@ prj_prj1_id = "[your-project1-id]" #### Use case 3.3 -To allow access from other projects while keeping access from the Internet restricted, you need to add those projects to a VPC SC perimeter together with Cloud Run. Projects outisde the perimeter will be blocked. This way you can control which projects can gain access. +To allow access from other projects while keeping access from the Internet restricted, you need to add those projects to a VPC SC perimeter together with Cloud Run. Projects outside the perimeter will be blocked. This way you can control which projects can gain access.

diff --git a/blueprints/third-party-solutions/README.md b/blueprints/third-party-solutions/README.md index c7cbec7379..c81bc14430 100644 --- a/blueprints/third-party-solutions/README.md +++ b/blueprints/third-party-solutions/README.md @@ -6,7 +6,7 @@ The blueprints in this folder show how to automate installation of specific thir ### OpenShift cluster bootstrap on Shared VPC - This [example](./openshift/) shows how to quickly bootstrap an OpenShift 4.7 cluster on GCP, using typical enterprise features like Shared VPC and CMEK for instance disks. + This [example](./openshift/) shows how to quickly bootstrap an OpenShift 4.7 cluster on GCP, using typical enterprise features like Shared VPC and CMEK for instance disks.
diff --git a/blueprints/third-party-solutions/openshift/README.md b/blueprints/third-party-solutions/openshift/README.md index b0be97ba0f..626df5f107 100644 --- a/blueprints/third-party-solutions/openshift/README.md +++ b/blueprints/third-party-solutions/openshift/README.md @@ -139,7 +139,7 @@ Variable configuration is best done in a `.tfvars` file, but can also be done di
disk_encryption_key
Set to null if you are not using CMEK keys for disk encryption. If you are using it, ensure the GCE robot account has permissions on the key.
fs_paths
-
Filesystem paths for the external dependencies. Home path expansion is supported. The config_dir path is where generated ignition files will be created. Ensure it's empty (incuding hidden files) before starting the installation process.
+
Filesystem paths for the external dependencies. Home path expansion is supported. The config_dir path is where generated ignition files will be created. Ensure it's empty (including hidden files) before starting the installation process.
host_project
If you don't need installing in different subnets, pass the same subnet names for the default, masters, and workers subnets.
install_config_params
diff --git a/blueprints/third-party-solutions/openshift/tf/README.md b/blueprints/third-party-solutions/openshift/tf/README.md index 190f0b42ee..b2fd0652f3 100644 --- a/blueprints/third-party-solutions/openshift/tf/README.md +++ b/blueprints/third-party-solutions/openshift/tf/README.md @@ -12,7 +12,7 @@ This example is a companion setup to the Python script in the parent folder, and | [fs_paths](variables.tf#L44) | Filesystem paths for commands and data, supports home path expansion. | object({…}) | ✓ | | | [host_project](variables.tf#L55) | Shared VPC project and network configuration. | object({…}) | ✓ | | | [service_project](variables.tf#L125) | Service project configuration. | object({…}) | ✓ | | -| [allowed_ranges](variables.tf#L17) | Ranges that can SSH to the boostrap VM and API endpoint. | list(any) | | ["10.0.0.0/8"] | +| [allowed_ranges](variables.tf#L17) | Ranges that can SSH to the bootstrap VM and API endpoint. | list(any) | | ["10.0.0.0/8"] | | [disk_encryption_key](variables.tf#L28) | Optional CMEK for disk encryption. | object({…}) | | null | | [install_config_params](variables.tf#L68) | OpenShift cluster configuration. | object({…}) | | {…} | | [post_bootstrap_config](variables.tf#L103) | Name of the service account for the machine operator. Removes bootstrap resources when set. | object({…}) | | null | diff --git a/blueprints/third-party-solutions/openshift/tf/variables.tf b/blueprints/third-party-solutions/openshift/tf/variables.tf index ee90bfef81..fe7ea97db2 100644 --- a/blueprints/third-party-solutions/openshift/tf/variables.tf +++ b/blueprints/third-party-solutions/openshift/tf/variables.tf @@ -15,7 +15,7 @@ */ variable "allowed_ranges" { - description = "Ranges that can SSH to the boostrap VM and API endpoint." + description = "Ranges that can SSH to the bootstrap VM and API endpoint." type = list(any) default = ["10.0.0.0/8"] } diff --git a/fast/stages/0-bootstrap/README.md b/fast/stages/0-bootstrap/README.md index f784651f6e..2c7a8657a6 100644 --- a/fast/stages/0-bootstrap/README.md +++ b/fast/stages/0-bootstrap/README.md @@ -249,7 +249,7 @@ This second set of files is disabled by default, you can enable it by setting th outputs_location = "~/fast-config" ``` -Once the variable is set, `apply` will generate and manage providers and variables files, including the initial one used for this stage after the first run. You can then link these files in the relevant stages, instead of manually transfering outputs from one stage, to Terraform variables in another. +Once the variable is set, `apply` will generate and manage providers and variables files, including the initial one used for this stage after the first run. You can then link these files in the relevant stages, instead of manually transferring outputs from one stage, to Terraform variables in another. Below is the outline of the output files generated by all stages, which is identical for both the GCS and local filesystem copies: diff --git a/fast/stages/2-networking-a-peering/README.md b/fast/stages/2-networking-a-peering/README.md index 2552691a84..861549d55e 100644 --- a/fast/stages/2-networking-a-peering/README.md +++ b/fast/stages/2-networking-a-peering/README.md @@ -276,7 +276,7 @@ terraform apply ### Post-deployment activities -- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible. +- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible. - On-prem routers should accept BGP sessions from their cloud peers. - On-prem DNS servers should have forward zones for GCP-managed ones. diff --git a/fast/stages/2-networking-b-vpn/README.md b/fast/stages/2-networking-b-vpn/README.md index b5b87cefc3..0d0b02ff72 100644 --- a/fast/stages/2-networking-b-vpn/README.md +++ b/fast/stages/2-networking-b-vpn/README.md @@ -290,7 +290,7 @@ terraform apply ### Post-deployment activities -- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible. +- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible. - On-prem routers should accept BGP sessions from their cloud peers. - On-prem DNS servers should have forward zones for GCP-managed ones. @@ -386,7 +386,7 @@ Copy `vpn-spoke-dev.tf` to `vpn-spoke-staging.tf` - replace `dev` with `staging` VPN configuration also controls BGP advertisements, which requires the following variable changes: - `router_configs` to configure the new routers (one per region) created for the `staging` VPC -- `vpn_onprem_configs` to configure the new advertisments to on-premises for the new CIDRs +- `vpn_onprem_configs` to configure the new advertisements to on-premises for the new CIDRs - `vpn_spoke_configs` to configure the new advertisements to `landing` for the new VPC - new keys (one per region) should be added, such as e.g. `staging-ew1` and `staging-ew4` DNS configurations are centralised in the `dns-*.tf` files. Spokes delegate DNS resolution to Landing through DNS peering, and optionally define a private zone (e.g. `dev.gcp.example.com`) which the landing peers to. To configure DNS for a new environment, copy one of the other environments DNS files [e.g. (dns-dev.tf)](dns-dev.tf) into a new `dns-*.tf` file suffixed with the environment name (e.g. `dns-staging.tf`), and update its content accordingly. Don't forget to add a peering zone from the landing to the newly created environment private zone. diff --git a/fast/stages/2-networking-c-nva/README.md b/fast/stages/2-networking-c-nva/README.md index 7ab05f5675..32471fd102 100644 --- a/fast/stages/2-networking-c-nva/README.md +++ b/fast/stages/2-networking-c-nva/README.md @@ -358,7 +358,7 @@ terraform apply ### Post-deployment activities -- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible. +- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible. - On-prem routers should accept BGP sessions from their cloud peers. - On-prem DNS servers should have forward zones for GCP-managed ones. diff --git a/fast/stages/2-networking-d-separate-envs/README.md b/fast/stages/2-networking-d-separate-envs/README.md index 3929db7ee2..7158a96442 100644 --- a/fast/stages/2-networking-d-separate-envs/README.md +++ b/fast/stages/2-networking-d-separate-envs/README.md @@ -238,7 +238,7 @@ terraform apply ### Post-deployment activities -- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible. +- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible. - On-prem routers should accept BGP sessions from their cloud peers. - On-prem DNS servers should have forward zones for GCP-managed ones. diff --git a/fast/stages/2-networking-e-nva-bgp/README.md b/fast/stages/2-networking-e-nva-bgp/README.md index 4fee973543..f13f703d16 100644 --- a/fast/stages/2-networking-e-nva-bgp/README.md +++ b/fast/stages/2-networking-e-nva-bgp/README.md @@ -84,7 +84,7 @@ The "landing zone" is divided into two VPC networks: ### NCC, NVAs and BGP sessions -The VPCs connect through two sets of sample NVA machines: one per region, each containing two instances. The appliances run [Contrainer-Optimized OS](https://cloud.google.com/container-optimized-os/docs) and a container with [FRRouting](https://frrouting.org/). +The VPCs connect through two sets of sample NVA machines: one per region, each containing two instances. The appliances run [Container-Optimized OS](https://cloud.google.com/container-optimized-os/docs) and a container with [FRRouting](https://frrouting.org/). We levarage NCC-RA to allow the NVAs to establish BGP sessions with Cloud Routers in the untrusted and in the trusted VPCs. This allows Cloud Routers to advertise routes to the NVAs, and the NVAs to announce routes to the Cloud Router, so it can program them in the VPC. @@ -92,7 +92,7 @@ Specifically, each NVA establishes two BGP sessions (for redundancy) with the th **Cloud Routers in the untrusted VPC advertise the default route (0.0.0.0/0) to the NVAs**. The NVAs advertise the route to the Cloud Routers in the trusted VPC. These dynamic routes are then imported through VPC peerings in the spokes. -**Cloud Routers in the trusted hub advertis to the NVAs** all the subnets of the trusted VPCs. This includes the regional subnets and the cross-regional subnets. The NVAs manipulate the route costs (MED) before advertising them to the Cloud Routers in the untrusted VPC. This is done to guarantee symmetric traffic paths (more [here](https://medium.com/google-cloud/gcp-routing-adventures-vol-2-enterprise-multi-regional-deployments-in-google-cloud-3968e9591d59)). +**Cloud Routers in the trusted hub adverts to the NVAs** all the subnets of the trusted VPCs. This includes the regional subnets and the cross-regional subnets. The NVAs manipulate the route costs (MED) before advertising them to the Cloud Routers in the untrusted VPC. This is done to guarantee symmetric traffic paths (more [here](https://medium.com/google-cloud/gcp-routing-adventures-vol-2-enterprise-multi-regional-deployments-in-google-cloud-3968e9591d59)). NVAs establish **extra BGP sessions with both cross-regional NVAs**. In this case, the NVAs advertise the regional trusted routes only. This allows cross-spoke (environment) traffic to remain also symmetric (more [here](https://medium.com/google-cloud/gcp-routing-adventures-vol-2-enterprise-multi-regional-deployments-in-google-cloud-3968e9591d59)). We set these routes to be exchanged at a lower cost than the one set for the other routes. @@ -380,7 +380,7 @@ terraform apply ### Post-deployment activities -- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recomment aggregating routes as much as possible. +- On-prem routers should be configured to advertise all relevant CIDRs to the GCP environments. To avoid hitting GCP quotas, we recommend aggregating routes as much as possible. - On-prem routers should accept BGP sessions from their cloud peers. - On-prem DNS servers should have forward zones for GCP-managed ones. diff --git a/fast/stages/2-networking-e-nva-bgp/ncc.tf b/fast/stages/2-networking-e-nva-bgp/ncc.tf index c9d53bfd0e..d0d8766a21 100644 --- a/fast/stages/2-networking-e-nva-bgp/ncc.tf +++ b/fast/stages/2-networking-e-nva-bgp/ncc.tf @@ -97,7 +97,7 @@ module "spokes-untrusted" { custom_advertise = { all_subnets = false - ip_ranges = { "0.0.0.0/0" = "Deafult route." } + ip_ranges = { "0.0.0.0/0" = "Default route." } } } diff --git a/fast/stages/2-security/README.md b/fast/stages/2-security/README.md index 66f726b5f1..e28aac7b5d 100644 --- a/fast/stages/2-security/README.md +++ b/fast/stages/2-security/README.md @@ -311,8 +311,8 @@ Some references that might be useful in setting up this stage: | [kms_keys](variables.tf#L73) | KMS keys to create, keyed by name. Null attributes will be interpolated with defaults. | map(object({…})) | | {} | | | [outputs_location](variables.tf#L94) | Path where providers, tfvars files, and lists for the following stages are written. Leave empty to disable. | string | | null | | | [vpc_sc_access_levels](variables.tf#L122) | VPC SC access level definitions. | map(object({…})) | | {} | | -| [vpc_sc_egress_policies](variables.tf#L151) | VPC SC egress policy defnitions. | map(object({…})) | | {} | | -| [vpc_sc_ingress_policies](variables.tf#L171) | VPC SC ingress policy defnitions. | map(object({…})) | | {} | | +| [vpc_sc_egress_policies](variables.tf#L151) | VPC SC egress policy definitions. | map(object({…})) | | {} | | +| [vpc_sc_ingress_policies](variables.tf#L171) | VPC SC ingress policy definitions. | map(object({…})) | | {} | | | [vpc_sc_perimeters](variables.tf#L192) | VPC SC regular perimeter definitions. | object({…}) | | {} | | ## Outputs diff --git a/fast/stages/2-security/variables.tf b/fast/stages/2-security/variables.tf index e14d637635..f798de7866 100644 --- a/fast/stages/2-security/variables.tf +++ b/fast/stages/2-security/variables.tf @@ -149,7 +149,7 @@ variable "vpc_sc_access_levels" { } variable "vpc_sc_egress_policies" { - description = "VPC SC egress policy defnitions." + description = "VPC SC egress policy definitions." type = map(object({ from = object({ identity_type = optional(string, "ANY_IDENTITY") @@ -169,7 +169,7 @@ variable "vpc_sc_egress_policies" { } variable "vpc_sc_ingress_policies" { - description = "VPC SC ingress policy defnitions." + description = "VPC SC ingress policy definitions." type = map(object({ from = object({ access_levels = optional(list(string), []) diff --git a/fast/stages/3-data-platform/dev/README.md b/fast/stages/3-data-platform/dev/README.md index 07a8ad4661..17338155c8 100644 --- a/fast/stages/3-data-platform/dev/README.md +++ b/fast/stages/3-data-platform/dev/README.md @@ -62,15 +62,15 @@ The default configuration will implement 3 tags: Anything that is not tagged is available to all users who have access to the data warehouse. -You can configure your tags and roles associated by configuring the `data_catalog_tags` variable. We suggest useing the "[Best practices for using policy tags in BigQuery](https://cloud.google.com/bigquery/docs/best-practices-policy-tags)" article as a guide to designing your tags structure and access pattern. By default, no groups has access to tagged data. +You can configure your tags and roles associated by configuring the `data_catalog_tags` variable. We suggest using the "[Best practices for using policy tags in BigQuery](https://cloud.google.com/bigquery/docs/best-practices-policy-tags)" article as a guide to designing your tags structure and access pattern. By default, no groups has access to tagged data. ### VPC-SC -As is often the case in real-world configurations, [VPC-SC](https://cloud.google.com/vpc-service-controls) is needed to mitigate data exfiltration. VPC-SC can be configured from the [FAST security stage](../../2-security). This step is optional, but highly recomended, and depends on customer policies and security best practices. +As is often the case in real-world configurations, [VPC-SC](https://cloud.google.com/vpc-service-controls) is needed to mitigate data exfiltration. VPC-SC can be configured from the [FAST security stage](../../2-security). This step is optional, but highly recommended, and depends on customer policies and security best practices. To configure the use of VPC-SC on the data platform, you have to specify the data platform project numbers on the `vpc_sc_perimeter_projects.dev` variable on [FAST security stage](../../2-security#perimeter-resources). -In the case your Data Warehouse need to handle confidential data and you have the requirement to separate them deeply from other data and IAM is not enough, the suggested configuration is to keep the confidential project in a separate VPC-SC perimeter with the adequate ingress/egress rules needed for the load and tranformation service account. Below you can find an high level diagram describing the configuration. +In the case your Data Warehouse need to handle confidential data and you have the requirement to separate them deeply from other data and IAM is not enough, the suggested configuration is to keep the confidential project in a separate VPC-SC perimeter with the adequate ingress/egress rules needed for the load and transformation service account. Below you can find an high level diagram describing the configuration.

Data Platform VPC-SC diagram diff --git a/fast/stages/3-gke-multitenant/dev/README.md b/fast/stages/3-gke-multitenant/dev/README.md index 2152003a15..35dc7372d1 100644 --- a/fast/stages/3-gke-multitenant/dev/README.md +++ b/fast/stages/3-gke-multitenant/dev/README.md @@ -131,7 +131,7 @@ This stage is designed with multi-tenancy in mind, and the expectation is that - the `cluster_default` variable allows defining common defaults for all clusters - the `clusters` variable is used to declare the actual GKE clusters and allows overriding defaults on a per-cluster basis -- the `nodepool_defaults` variable allows definining common defaults for all node pools +- the `nodepool_defaults` variable allows defining common defaults for all node pools - the `nodepools` variable is used to declare cluster node pools and allows overriding defaults on a per-cluster basis There are two additional variables that influence cluster configuration: `authenticator_security_group` to configure [Google Groups for RBAC](https://cloud.google.com/kubernetes-engine/docs/how-to/google-groups-rbac), `dns_domain` to configure [Cloud DNS for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/cloud-dns). @@ -171,7 +171,7 @@ Leave all these variables unset (or set to `null`) to disable fleet management. | [clusters](variables.tf#L42) | Clusters configuration. Refer to the gke-cluster module for type details. | map(object({…})) | | {} | | | [fleet_configmanagement_clusters](variables.tf#L90) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | | | [fleet_configmanagement_templates](variables.tf#L98) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | | -| [fleet_features](variables.tf#L133) | Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | | +| [fleet_features](variables.tf#L133) | Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used. | object({…}) | | null | | | [fleet_workload_identity](variables.tf#L146) | Use Fleet Workload Identity for clusters. Enables GKE Hub if set to true. | bool | | false | | | [group_iam](variables.tf#L161) | Project-level authoritative IAM bindings for groups in {GROUP_EMAIL => [ROLES]} format. Use group emails as keys, list of roles as values. | map(list(string)) | | {} | | | [iam](variables.tf#L176) | Project-level authoritative IAM bindings for users and service accounts in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | | diff --git a/fast/stages/3-gke-multitenant/dev/variables.tf b/fast/stages/3-gke-multitenant/dev/variables.tf index a872b49d18..66936db6cf 100644 --- a/fast/stages/3-gke-multitenant/dev/variables.tf +++ b/fast/stages/3-gke-multitenant/dev/variables.tf @@ -131,7 +131,7 @@ variable "fleet_configmanagement_templates" { } variable "fleet_features" { - description = "Enable and configue fleet features. Set to null to disable GKE Hub if fleet workload identity is not used." + description = "Enable and configure fleet features. Set to null to disable GKE Hub if fleet workload identity is not used." type = object({ appdevexperience = bool configmanagement = bool diff --git a/modules/binauthz/README.md b/modules/binauthz/README.md index 960a7da31d..105a097a2f 100644 --- a/modules/binauthz/README.md +++ b/modules/binauthz/README.md @@ -4,7 +4,7 @@ This module simplifies the creation of a Binary Authorization policy, attestors ## Example -### Binary Athorization +### Binary Authorization ```hcl module "binauthz" { diff --git a/modules/cloud-config-container/README.md b/modules/cloud-config-container/README.md index 713ffa8831..2307a76d62 100644 --- a/modules/cloud-config-container/README.md +++ b/modules/cloud-config-container/README.md @@ -5,7 +5,7 @@ This set of modules creates specialized [cloud-config](https://cloud.google.com/ These modules are designed for several use cases: - to quickly prototype specialized services (eg MySQL access or HTTP serving) for prototyping infrastructure -- to emulate production services for perfomance testing +- to emulate production services for performance testing - to easily add glue components for services like DNS (eg to work around inbound/outbound forwarding limitations) - to implement cloud-native production deployments that leverage cloud-init for configuration management, without the need of a separate tool diff --git a/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/entrypoint.sh b/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/entrypoint.sh index bf596bc0f8..1d80c1bc50 100644 --- a/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/entrypoint.sh +++ b/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/entrypoint.sh @@ -24,7 +24,7 @@ _stop_ipsec() { } trap _stop_ipsec TERM -# Making the containter to work as a default gateway for LAN_NETWORKS +# Making the container to work as a default gateway for LAN_NETWORKS iptables -t nat -A POSTROUTING -s ${LAN_NETWORKS} -o ${VPN_DEVICE} -m policy --dir out --pol ipsec -j ACCEPT iptables -t nat -A POSTROUTING -s ${LAN_NETWORKS} -o ${VPN_DEVICE} -j MASQUERADE diff --git a/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/ipsec-vti.sh b/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/ipsec-vti.sh index 399ff629d0..5bff8bfc37 100644 --- a/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/ipsec-vti.sh +++ b/modules/cloud-config-container/__need_fixing/onprem/docker-images/strongswan/ipsec-vti.sh @@ -48,7 +48,7 @@ case "${PLUTO_VERB}" in # Enable loosy source validation, if possible. Otherwise disable validation. sudo /sbin/sysctl -w net.ipv4.conf.${VTI_IF}.rp_filter=2 || sysctl -w net.ipv4.conf.${VTI_IF}.rp_filter=0 - # If you would like to use VTI for policy-based you shoud take care of routing by yourselv, e.x. + # If you would like to use VTI for policy-based you should take care of routing by yourselv, e.x. if [[ "${PLUTO_PEER_CLIENT}" != "0.0.0.0/0" ]]; then ${IP} r add "${PLUTO_PEER_CLIENT}" dev "${VTI_IF}" fi diff --git a/modules/cloud-config-container/squid/squid.conf b/modules/cloud-config-container/squid/squid.conf index fda94b3c4a..4800257e0e 100644 --- a/modules/cloud-config-container/squid/squid.conf +++ b/modules/cloud-config-container/squid/squid.conf @@ -33,7 +33,7 @@ http_access deny !safe_ports # deny CONNECT if connection is not using ssl http_access deny CONNECT !ssl_ports -# deny acccess to cachemgr +# deny access to cachemgr http_access deny manager # deny access to localhost through the proxy diff --git a/modules/cloud-run/README.md b/modules/cloud-run/README.md index d3aee2f604..cfd4d79282 100644 --- a/modules/cloud-run/README.md +++ b/modules/cloud-run/README.md @@ -101,7 +101,7 @@ module "cloud_run" { ### VPC Access Connector creation -If creation of a [VPC Access Connector](https://cloud.google.com/vpc/docs/serverless-vpc-access) is required, use the `vpc_connector_create` variable which also support optional attribtues for number of instances, machine type, and throughput (not shown here). The annotation to use the connector will be added automatically. +If creation of a [VPC Access Connector](https://cloud.google.com/vpc/docs/serverless-vpc-access) is required, use the `vpc_connector_create` variable which also support optional attributes for number of instances, machine type, and throughput (not shown here). The annotation to use the connector will be added automatically. ```hcl module "cloud_run" { diff --git a/modules/compute-vm/README.md b/modules/compute-vm/README.md index 1d48dd51b9..bae1bf104f 100644 --- a/modules/compute-vm/README.md +++ b/modules/compute-vm/README.md @@ -49,7 +49,7 @@ module "simple-vm-example" { VM service accounts can be managed in three different ways: -- You can let the module create a service account for you by settting `service_account_create = true` +- You can let the module create a service account for you by setting `service_account_create = true` - You can use an existing service account by setting `service_account_create = false` (the default value) and passing the full email address of the service account to the `service_account` variable. This is useful, for example, if you want to reuse the service account from another previously created instance, or if you want to create the service account manually with the `iam-service-account` module. In this case, you probably also want to set `service_account_scopes` to `cloud-platform`. - Lastly, you can use the default compute service account by setting `service_account_crate = false`. Please note that using the default compute service account is not recommended. @@ -204,7 +204,7 @@ module "vm-disk-options-example" { #### Internal and external IPs -By default VNs are create with an automatically assigned IP addresses, but you can change it through the `addreses` and `nat` attributes of the `network_interfaces` variable: +By default VNs are create with an automatically assigned IP addresses, but you can change it through the `addresses` and `nat` attributes of the `network_interfaces` variable: ```hcl module "vm-internal-ip" { diff --git a/modules/endpoints/README.md b/modules/endpoints/README.md index e83954f135..2b687966e2 100644 --- a/modules/endpoints/README.md +++ b/modules/endpoints/README.md @@ -40,7 +40,7 @@ host: "echo-api.endpoints.YOUR-PROJECT-ID.cloud.goog" |---|---|:---:|:---:|:---:| | [openapi_config](variables.tf#L32) | The configuration for an OpenAPI endopoint. Either this or grpc_config must be specified. | object({…}) | ✓ | | | [service_name](variables.tf#L45) | The name of the service. Usually of the form '$apiname.endpoints.$projectid.cloud.goog'. | string | ✓ | | -| [grpc_config](variables.tf#L17) | The configuration for a gRPC enpoint. Either this or openapi_config must be specified. | object({…}) | | null | +| [grpc_config](variables.tf#L17) | The configuration for a gRPC endpoint. Either this or openapi_config must be specified. | object({…}) | | null | | [iam](variables.tf#L26) | IAM bindings for topic in {ROLE => [MEMBERS]} format. | map(list(string)) | | {} | | [project_id](variables.tf#L39) | The project ID that the service belongs to. | string | | null | diff --git a/modules/endpoints/variables.tf b/modules/endpoints/variables.tf index f590670ecf..ffd621ec6c 100644 --- a/modules/endpoints/variables.tf +++ b/modules/endpoints/variables.tf @@ -15,7 +15,7 @@ */ variable "grpc_config" { - description = "The configuration for a gRPC enpoint. Either this or openapi_config must be specified." + description = "The configuration for a gRPC endpoint. Either this or openapi_config must be specified." type = object({ yaml_path = string protoc_output_path = string diff --git a/modules/gke-cluster-standard/main.tf b/modules/gke-cluster-standard/main.tf index 70aed87b25..dedd705ba8 100644 --- a/modules/gke-cluster-standard/main.tf +++ b/modules/gke-cluster-standard/main.tf @@ -266,7 +266,7 @@ resource "google_container_cluster" "cluster" { } } - # dataplane v2 has bult-in network policies + # dataplane v2 has built-in network policies dynamic "network_policy" { for_each = ( var.enable_addons.network_policy && !var.enable_features.dataplane_v2 diff --git a/modules/gke-hub/README.md b/modules/gke-hub/README.md index 8f801f934d..d9d14c6269 100644 --- a/modules/gke-hub/README.md +++ b/modules/gke-hub/README.md @@ -326,7 +326,7 @@ module "hub" { | [clusters](variables.tf#L17) | Clusters members of this GKE Hub in name => id format. | map(string) | | {} | | [configmanagement_clusters](variables.tf#L24) | Config management features enabled on specific sets of member clusters, in config name => [cluster name] format. | map(list(string)) | | {} | | [configmanagement_templates](variables.tf#L31) | Sets of config management configurations that can be applied to member clusters, in config name => {options} format. | map(object({…})) | | {} | -| [features](variables.tf#L66) | Enable and configue fleet features. | object({…}) | | {…} | +| [features](variables.tf#L66) | Enable and configure fleet features. | object({…}) | | {…} | | [workload_identity_clusters](variables.tf#L92) | Clusters that will use Fleet Workload Identity. | list(string) | | [] | ## Outputs diff --git a/modules/gke-hub/variables.tf b/modules/gke-hub/variables.tf index 25e3d21d53..20641fe68c 100644 --- a/modules/gke-hub/variables.tf +++ b/modules/gke-hub/variables.tf @@ -64,7 +64,7 @@ variable "configmanagement_templates" { } variable "features" { - description = "Enable and configue fleet features." + description = "Enable and configure fleet features." type = object({ appdevexperience = optional(bool, false) configmanagement = optional(bool, false) diff --git a/modules/net-glb/README.md b/modules/net-glb/README.md index 74237a608e..1b1625f0fd 100644 --- a/modules/net-glb/README.md +++ b/modules/net-glb/README.md @@ -2,7 +2,7 @@ This module allows managing Global HTTP/HTTPS Classic Load Balancers (GLBs). It's designed to expose the full configuration of the underlying resources, and to facilitate common usage patterns by providing sensible defaults, and optionally managing prerequisite resources like health checks, instance groups, etc. -Due to the complexity of the underlying resources, changes to the configuration that involve recreation of resources are best applied in stages, starting by disabling the configuration in the urlmap that references the resources that neeed recreation, then doing the same for the backend service, etc. +Due to the complexity of the underlying resources, changes to the configuration that involve recreation of resources are best applied in stages, starting by disabling the configuration in the urlmap that references the resources that need recreation, then doing the same for the backend service, etc. ## Examples diff --git a/modules/projects-data-source/README.md b/modules/projects-data-source/README.md index 5d35f1ab0c..25402a04e1 100644 --- a/modules/projects-data-source/README.md +++ b/modules/projects-data-source/README.md @@ -82,7 +82,7 @@ output "filtered-projects" { | [parent](variables.tf#L55) | Parent folder or organization in 'folders/folder_id' or 'organizations/org_id' format. | string | ✓ | | | [ignore_folders](variables.tf#L17) | A list of folder IDs or numbers to be excluded from the output, all the subfolders and projects are exluded from the output regardless of the include_projects variable. | list(string) | | [] | | [ignore_projects](variables.tf#L28) | A list of project IDs, numbers or prefixes to exclude matching projects from the module output. | list(string) | | [] | -| [include_projects](variables.tf#L41) | A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wilcard entries. | list(string) | | [] | +| [include_projects](variables.tf#L41) | A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wildcard entries. | list(string) | | [] | | [query](variables.tf#L64) | A string query as defined in the [Query Syntax](https://cloud.google.com/asset-inventory/docs/query-syntax). | string | | "state:ACTIVE" | ## Outputs diff --git a/modules/projects-data-source/variables.tf b/modules/projects-data-source/variables.tf index 9fef35ab6c..c774784c85 100644 --- a/modules/projects-data-source/variables.tf +++ b/modules/projects-data-source/variables.tf @@ -39,7 +39,7 @@ variable "ignore_projects" { } variable "include_projects" { - description = "A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wilcard entries." + description = "A list of project IDs/numbers to include to the output if some of them are excluded by `ignore_projects` wildcard entries." type = list(string) default = [] # example excluding all the projects starting with "prf-" except "prd-123457" diff --git a/tests/modules/cloud_config_container_mysql/test_plan.py b/tests/modules/cloud_config_container_mysql/test_plan.py index 54d90a00dd..0b81daa809 100644 --- a/tests/modules/cloud_config_container_mysql/test_plan.py +++ b/tests/modules/cloud_config_container_mysql/test_plan.py @@ -17,7 +17,7 @@ def test_defaults(plan_summary): - "Test defalt configuration." + "Test default configuration." # _, output = apply_runner(mysql_password='foo') summary = plan_summary('modules/cloud-config-container/mysql/', mysql_password='foo') diff --git a/tools/state_iam.py b/tools/state_iam.py index 1ea887f8de..00c549a67c 100755 --- a/tools/state_iam.py +++ b/tools/state_iam.py @@ -75,10 +75,10 @@ def get_bindings(resources, prefix=None, folders=None): # Handle Cloud Services Service Account if member_domain == 'cloudservices.gserviceaccount.com': member_id = "PROJECT_CLOUD_SERVICES" - # Handle Cloud Service Identity Service Acocunt + # Handle Cloud Service Identity Service Account if re.match("^service-\d{8}", member_id): member_id = "SERVICE_IDENTITY_" + member_domain.split(".", 1)[0] - # Handle BQ Cloud Service Identity Service Acocunt + # Handle BQ Cloud Service Identity Service Account if re.match("^bq-\d{8}", member_id): member_id = "IDENTITY_" + member_domain.split(".", 1)[0] resource_type_output = "Service Identity - " + resource_type From a2a2942e2ce6ec5247524a5e83b09d377d0dad13 Mon Sep 17 00:00:00 2001 From: Alejandro Leal Date: Mon, 15 May 2023 14:50:47 -0400 Subject: [PATCH 2/4] Update README.md --- .../network-dashboard/deploy-cloud-function/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/README.md b/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/README.md index 72dff9b0b5..acee13ee8d 100644 --- a/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/README.md +++ b/blueprints/cloud-operations/network-dashboard/deploy-cloud-function/README.md @@ -14,7 +14,7 @@ A few configuration values for the function which are relevant to this example c ## Discovery configuration -Discovery configuration is done via the `discovery_config` variable, which mimicks the set of options available when running the discovery tool in cli mode. Pay particular care in defining the right top-level scope via the `discovery_root` attribute, as this is the root of the hierarchy used to discover Compute resources and it needs to include the individual folders and projects that needs to be monitored, which are defined via the `monitored_folders` and `monitored_projects` attributes. +Discovery configuration is done via the `discovery_config` variable, which mimics the set of options available when running the discovery tool in cli mode. Pay particular care in defining the right top-level scope via the `discovery_root` attribute, as this is the root of the hierarchy used to discover Compute resources and it needs to include the individual folders and projects that needs to be monitored, which are defined via the `monitored_folders` and `monitored_projects` attributes. The following schematic diagram of a resource hierarchy illustrates the interplay between root scope and monitored resources. The root scope is set to the top-level red folder and completely encloses every resource that needs to be monitored. The blue folder and project are set as monitored defining the actual perimeter used to discover resources. Note that setting the root scope to the blue folder would have resulted in the rightmost project being excluded. @@ -64,7 +64,7 @@ dashboard_json_path = "../dashboards/quotas-utilization.json" | name | description | type | required | default | |---|---|:---:|:---:|:---:| -| [discovery_config](variables.tf#L48) | Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empy, every project under the discovery root node will be monitored. | object({…}) | ✓ | | +| [discovery_config](variables.tf#L48) | Discovery configuration. Discovery root is the organization or a folder. If monitored folders and projects are empty, every project under the discovery root node will be monitored. | object({…}) | ✓ | | | [project_id](variables.tf#L100) | Project id where the Cloud Function will be deployed. | string | ✓ | | | [bundle_path](variables.tf#L17) | Path used to write the intermediate Cloud Function code bundle. | string | | "./bundle.zip" | | [cloud_function_config](variables.tf#L23) | Optional Cloud Function configuration. | object({…}) | | {} | From 65e6ef98faef951165c529a9bf2ecfd6182f7da5 Mon Sep 17 00:00:00 2001 From: Alejandro Leal Date: Mon, 15 May 2023 16:34:18 -0400 Subject: [PATCH 3/4] Reverted README.md to commit 85407109c862f9ca5bb46e04bb10876bd1e04988 --- blueprints/data-solutions/shielded-folder/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blueprints/data-solutions/shielded-folder/README.md b/blueprints/data-solutions/shielded-folder/README.md index dff46169c6..c4b471bdd3 100644 --- a/blueprints/data-solutions/shielded-folder/README.md +++ b/blueprints/data-solutions/shielded-folder/README.md @@ -112,7 +112,7 @@ The Shielded Folder blueprint is meant to be executed by a Service Account (or a - `roles/resourcemanager.folderAdmin` - `roles/resourcemanager.projectCreator` -The shielded Folder blueprint assumes [groups described](#user-groups) are created in your GCP organization. +The shielded Folfer blueprint assumes [groups described](#user-groups) are created in your GCP organization. ### Variable configuration PIPPO From 11f7f3efed49762cb648e5a1e6c071793a785641 Mon Sep 17 00:00:00 2001 From: Alejandro Leal Date: Mon, 15 May 2023 16:45:13 -0400 Subject: [PATCH 4/4] Reverted variables.tf metadata.yaml README.md to commit 85407109c862f9ca5bb46e04bb10876bd1e04988 --- blueprints/data-solutions/shielded-folder/variables.tf | 4 ++-- blueprints/data-solutions/vertex-mlops/README.md | 2 +- blueprints/data-solutions/vertex-mlops/metadata.yaml | 2 +- blueprints/data-solutions/vertex-mlops/variables.tf | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/blueprints/data-solutions/shielded-folder/variables.tf b/blueprints/data-solutions/shielded-folder/variables.tf index f4ec8acb19..a9ecbb241b 100644 --- a/blueprints/data-solutions/shielded-folder/variables.tf +++ b/blueprints/data-solutions/shielded-folder/variables.tf @@ -188,7 +188,7 @@ variable "vpc_sc_access_levels" { } variable "vpc_sc_egress_policies" { - description = "VPC SC egress policy definitions." + description = "VPC SC egress policy defnitions." type = map(object({ from = object({ identity_type = optional(string, "ANY_IDENTITY") @@ -208,7 +208,7 @@ variable "vpc_sc_egress_policies" { } variable "vpc_sc_ingress_policies" { - description = "VPC SC ingress policy definitions." + description = "VPC SC ingress policy defnitions." type = map(object({ from = object({ access_levels = optional(list(string), []) diff --git a/blueprints/data-solutions/vertex-mlops/README.md b/blueprints/data-solutions/vertex-mlops/README.md index e9cf4909f3..4c918bbc27 100644 --- a/blueprints/data-solutions/vertex-mlops/README.md +++ b/blueprints/data-solutions/vertex-mlops/README.md @@ -36,7 +36,7 @@ Assign roles relying on User groups is a way to decouple the final set of permis We use the following groups to control access to resources: -- *Data Scientist* (gcp-ml-ds@). They manage notebooks and create ML pipelines. +- *Data Scientits* (gcp-ml-ds@). They manage notebooks and create ML pipelines. - *ML Engineers* (gcp-ml-eng@). They manage the different Vertex resources. - *ML Viewer* (gcp-ml-eng@). Group with wiewer permission for the different resources. diff --git a/blueprints/data-solutions/vertex-mlops/metadata.yaml b/blueprints/data-solutions/vertex-mlops/metadata.yaml index bb12a5f93f..d09c244738 100644 --- a/blueprints/data-solutions/vertex-mlops/metadata.yaml +++ b/blueprints/data-solutions/vertex-mlops/metadata.yaml @@ -50,7 +50,7 @@ spec: interfaces: variables: - name: notebooks - description: Vertex AI workbenches to be deployed. Service Account runtime/instances deployed. + description: Vertex AI workbenchs to be deployed. Service Account runtime/instances deployed. type: |- map(object({ type = string diff --git a/blueprints/data-solutions/vertex-mlops/variables.tf b/blueprints/data-solutions/vertex-mlops/variables.tf index d9e375dfb7..cc8200f9d4 100644 --- a/blueprints/data-solutions/vertex-mlops/variables.tf +++ b/blueprints/data-solutions/vertex-mlops/variables.tf @@ -67,7 +67,7 @@ variable "network_config" { } variable "notebooks" { - description = "Vertex AI workbenches to be deployed. Service Account runtime/instances deployed." + description = "Vertex AI workbenchs to be deployed. Service Account runtime/instances deployed." type = map(object({ type = string machine_type = optional(string, "n1-standard-4")