From 43734bed6281f1141d6058ea829481125305977d Mon Sep 17 00:00:00 2001 From: Andrew Peabody Date: Wed, 18 Dec 2024 19:27:35 +0000 Subject: [PATCH] final work --- build/int.cloudbuild.yaml | 286 +++++++++++++++++- test/fixtures/node_pool/outputs.tf | 4 + test/integration/go.mod | 4 +- test/integration/node_pool/node_pool_test.go | 125 +------- .../node_pool/testdata/TestNodePool.json | 12 +- .../safer_cluster_iap_bastion_test.go | 10 +- test/integration/testutils/utils.go | 46 +++ 7 files changed, 355 insertions(+), 132 deletions(-) diff --git a/build/int.cloudbuild.yaml b/build/int.cloudbuild.yaml index c8bab6c35..d5756f087 100644 --- a/build/int.cloudbuild.yaml +++ b/build/int.cloudbuild.yaml @@ -24,19 +24,295 @@ steps: - 'TF_VAR_org_id=$_ORG_ID' - 'TF_VAR_folder_id=$_FOLDER_ID' - 'TF_VAR_billing_account=$_BILLING_ACCOUNT' -- id: init node-pool-local +- id: init-all waitFor: - prepare name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' - args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage init --verbose'] -- id: converge node-pool-local + args: ['/bin/bash', '-c', 'cft test run all --stage init --verbose'] +- id: create-all waitFor: - - init node-pool-local + - init-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do create'] +- id: apply disable-client-cert + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage apply --verbose --test-dir test/integration'] +- id: verify disable-client-cert + waitFor: + - apply disable-client-cert + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage verify --verbose --test-dir test/integration'] +- id: teardown disable-client-cert + waitFor: + - verify disable-client-cert + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDisableClientCert --stage teardown --verbose --test-dir test/integration'] +- id: apply shared-vpc-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage apply --verbose --test-dir test/integration'] +- id: verify shared-vpc-local + waitFor: + - apply shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage verify --verbose --test-dir test/integration'] +- id: destroy shared-vpc-local + waitFor: + - verify shared-vpc-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSharedVPC --stage teardown --verbose --test-dir test/integration'] +- id: apply safer-cluster-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage apply --verbose'] +- id: verify safer-cluster-local + waitFor: + - apply safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage verify --verbose'] +- id: destroy safer-cluster-local + waitFor: + - verify safer-cluster-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSaferCluster --stage destroy --verbose'] +- id: apply simple-regional-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage apply --verbose'] +- id: verify simple-regional-local + waitFor: + - apply simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage verify --verbose'] +- id: destroy simple-regional-local + waitFor: + - verify simple-regional-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegional --stage teardown --verbose'] +- id: apply simple-regional-private-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage apply --verbose'] +- id: verify simple-regional-private-local + waitFor: + - apply simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage verify --verbose'] +- id: destroy simple-regional-private-local + waitFor: + - verify simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalPrivate --stage teardown --verbose'] +- id: apply simple-regional-cluster-autoscaling + waitFor: + - create-all + - destroy simple-regional-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage apply --verbose'] +- id: verify simple-regional-cluster-autoscaling + waitFor: + - apply simple-regional-cluster-autoscaling + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage verify --verbose'] +- id: destroy simple-regional-cluster-autoscaling + waitFor: + - verify simple-regional-cluster-autoscaling + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalClusterAutoscaling --stage teardown --verbose'] +- id: apply simple-regional-with-kubeconfig-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage apply --verbose'] +- id: verify simple-regional-with-kubeconfig-local + waitFor: + - apply simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage verify --verbose'] +- id: destroy simple-regional-with-kubeconfig-local + waitFor: + - verify simple-regional-with-kubeconfig-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithKubeConfig --stage teardown --verbose'] +- id: converge simple-regional-with-gateway-api-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge simple-regional-with-gateway-api-local'] +- id: verify simple-regional-with-gateway-api-local + waitFor: + - converge simple-regional-with-gateway-api-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify simple-regional-with-gateway-api-local'] +- id: destroy simple-regional-with-gateway-api-local + waitFor: + - verify simple-regional-with-gateway-api-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy simple-regional-with-gateway-api-local'] +- id: apply simple-regional-with-networking-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage apply --verbose'] +- id: verify simple-regional-with-networking-local + waitFor: + - apply simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage verify --verbose'] +- id: destroy simple-regional-with-networking-local + waitFor: + - verify simple-regional-with-networking-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleRegionalWithNetworking --stage teardown --verbose'] +- id: apply simple-zonal-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage apply --verbose'] +- id: verify simple-zonal-local + waitFor: + - apply simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage verify --verbose'] +- id: destroy simple-zonal-local + waitFor: + - verify simple-zonal-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonal --stage teardown --verbose'] +- id: apply simple-zonal-private-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage apply --verbose'] +- id: verify simple-zonal-private-local + waitFor: + - apply simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage verify --verbose'] +- id: destroy simple-zonal-private-local + waitFor: + - verify simple-zonal-private-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleZonalPrivate --stage teardown --verbose'] +- id: converge stub-domains-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-local'] +- id: verify stub-domains-local + waitFor: + - converge stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-local'] +- id: destroy stub-domains-local + waitFor: + - verify stub-domains-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-local'] +- id: converge upstream-nameservers-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge upstream-nameservers-local'] +- id: verify upstream-nameservers-local + waitFor: + - converge upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify upstream-nameservers-local'] +- id: destroy upstream-nameservers-local + waitFor: + - verify upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy upstream-nameservers-local'] +- id: converge stub-domains-upstream-nameservers-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge stub-domains-upstream-nameservers-local'] +- id: verify stub-domains-upstream-nameservers-local + waitFor: + - converge stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify stub-domains-upstream-nameservers-local'] +- id: destroy stub-domains-upstream-nameservers-local + waitFor: + - verify stub-domains-upstream-nameservers-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy stub-domains-upstream-nameservers-local'] +- id: converge workload-metadata-config-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do converge workload-metadata-config-local'] +- id: verify workload-metadata-config-local + waitFor: + - converge workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do verify workload-metadata-config-local'] +- id: destroy workload-metadata-config-local + waitFor: + - verify workload-metadata-config-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'source /usr/local/bin/task_helper_functions.sh && kitchen_do destroy workload-metadata-config-local'] +- id: apply beta-cluster + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage apply --verbose --test-dir test/integration'] +- id: verify beta-cluster + waitFor: + - apply beta-cluster + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage verify --verbose --test-dir test/integration'] +- id: teardown beta-cluster + waitFor: + - verify beta-cluster + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestBetaCluster --stage teardown --verbose --test-dir test/integration'] +- id: apply simple-windows-node-pool-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage apply --verbose --test-dir test/integration'] +- id: verify simple-windows-node-pool-local + waitFor: + - apply simple-windows-node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage verify --verbose --test-dir test/integration'] +- id: destroy simple-windows-node-pool-local + waitFor: + - verify simple-windows-node-pool-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestSimpleWindowsNodePool --stage teardown --verbose --test-dir test/integration'] +- id: apply deploy-service-local + waitFor: + - create-all + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage apply --verbose'] +- id: verify deploy-service-local + waitFor: + - apply deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage verify --verbose'] +- id: destroy deploy-service-local + waitFor: + - verify deploy-service-local + name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' + args: ['/bin/bash', '-c', 'cft test run TestDeployService --stage destroy --verbose'] +- id: apply node-pool-local + waitFor: + - init create-all name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage apply --verbose'] - id: verify node-pool-local waitFor: - - converge node-pool-local + - apply node-pool-local name: 'gcr.io/cloud-foundation-cicd/$_DOCKER_IMAGE_DEVELOPER_TOOLS:$_DOCKER_TAG_VERSION_DEVELOPER_TOOLS' args: ['/bin/bash', '-c', 'cft test run TestNodePool --stage verify --verbose'] - id: destroy node-pool-local diff --git a/test/fixtures/node_pool/outputs.tf b/test/fixtures/node_pool/outputs.tf index 74103ff0b..c00d31b1a 100644 --- a/test/fixtures/node_pool/outputs.tf +++ b/test/fixtures/node_pool/outputs.tf @@ -87,3 +87,7 @@ output "registry_project_ids" { output "random_string" { value = random_string.suffix.result } + +output "compute_engine_service_account" { + value = var.compute_engine_service_accounts[0] +} diff --git a/test/integration/go.mod b/test/integration/go.mod index c42f2394a..cd75b51e9 100644 --- a/test/integration/go.mod +++ b/test/integration/go.mod @@ -9,6 +9,8 @@ require ( github.com/gruntwork-io/terratest v0.48.1 github.com/hashicorp/terraform-json v0.24.0 github.com/stretchr/testify v1.10.0 + github.com/tidwall/gjson v1.18.0 + golang.org/x/sync v0.10.0 ) require ( @@ -103,7 +105,6 @@ require ( github.com/pquerna/otp v1.4.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect @@ -116,7 +117,6 @@ require ( golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.31.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect diff --git a/test/integration/node_pool/node_pool_test.go b/test/integration/node_pool/node_pool_test.go index 92d6caaf5..1e4afa194 100644 --- a/test/integration/node_pool/node_pool_test.go +++ b/test/integration/node_pool/node_pool_test.go @@ -15,16 +15,12 @@ package node_pool import ( "fmt" - "slices" "testing" "time" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/cai" - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" - "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" - "github.com/gruntwork-io/terratest/modules/k8s" "github.com/stretchr/testify/assert" "github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils" ) @@ -44,128 +40,29 @@ func TestNodePool(t *testing.T) { clusterName := bpt.GetStringOutput("cluster_name") randomString := bpt.GetStringOutput("random_string") kubernetesEndpoint := bpt.GetStringOutput("kubernetes_endpoint") + nodeServiceAccount := bpt.GetStringOutput("compute_engine_service_account") // Retrieve Project CAI projectCAI := cai.GetProjectResources(t, projectId, cai.WithAssetTypes([]string{"container.googleapis.com/Cluster", "k8s.io/Node"})) - t.Log(projectCAI.Raw) - // Retrieve Cluster from CAI - clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName) - - if !projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data").Exists() { - t.Fatalf("Cluster not found: %s", clusterResourceName) - } - cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data") - t.Log(cluster.Raw) + // Retrieve Cluster from CAI // Equivalent gcloud describe command (classic) // cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId) + clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName) + cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data") - // Cluster Assertions (classic) - assert.Contains([]string{"RUNNING", "RECONCILING"}, cluster.Get("status").String(), "Cluster is Running") - assert.Equal("COS_CONTAINERD", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.imageType").String(), "has the expected image type") - assert.Equal("https://www.googleapis.com/auth/cloud-platform", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes.0").String(), "has the expected oauth scopes") - assert.Equal("default", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount").String(), "has the expected service account") - assert.Equal("OPTIMIZE_UTILIZATION", cluster.Get("autoscaling.autoscalingProfile").String(), "has the expected autoscaling profile") - assert.True(cluster.Get("autoscaling.enableNodeAutoprovisioning").Bool(), "has the expected node autoprovisioning") - assert.JSONEq(`[ - { - "maximum": "20", - "minimum": "5", - "resourceType": "cpu" - }, - { - "maximum": "30", - "minimum": "10", - "resourceType": "memory" - } - ]`, - cluster.Get("autoscaling.resourceLimits").String(), "has the expected resource limits") - - // Cluster Assertions using golden image (TestNodePool.json) with sanitizer + // Setup golden image with sanitizers g := golden.NewOrUpdate(t, cluster.String(), + golden.WithSanitizer(golden.StringSanitizer(nodeServiceAccount, "NODE_SERVICE_ACCOUNT")), golden.WithSanitizer(golden.StringSanitizer(projectId, "PROJECT_ID")), golden.WithSanitizer(golden.StringSanitizer(randomString, "RANDOM_STRING")), golden.WithSanitizer(golden.StringSanitizer(kubernetesEndpoint, "KUBERNETES_ENDPOINT")), ) - checkPaths := utils.GetTerminalJSONPaths(g.GetJSON()) - - exemptPaths := []string{"nodePools"} - checkPaths = slices.DeleteFunc(checkPaths, func(s string) bool { - return slices.Contains(exemptPaths, s) - }) - g.JSONPathEqs(assert, cluster, checkPaths) - - // NodePool Assertions - nodePools := []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"} - for _, nodePool := range nodePools { - g.JSONPathEqs(assert, cluster.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)), utils.GetTerminalJSONPaths(g.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)))) - } - - // nodePool-01 Assertions - assert.Equal("pool-01", cluster.Get("nodePools.#(name==\"pool-01\").name").String(), "pool-1 exists") - assert.Equal("e2-medium", cluster.Get("nodePools.#(name==\"pool-01\").config.machineType").String(), "is the expected machine type") - assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-01\").config.imageType").String(), "has the expected image") - assert.True(cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.enabled").Bool(), "has autoscaling enabled") - assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") - assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoRepair").Bool(), "has autorepair enabled") - assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") - assert.Equal("kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.shutdown-script").String(), "pool-2 exists") - assert.Equal("false", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.disable-legacy-endpoints").String(), "pool-2 exists") - assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "pool-01-example": "true", "cluster_name": "%s", "node_pool": "pool-01"}`, clusterName), - cluster.Get("nodePools.#(name==\"pool-01\").config.labels").String(), "has the expected labels") - assert.ElementsMatch([]string{"all-node-example", "pool-01-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-01", clusterName)}, - cluster.Get("nodePools.#(name==\"pool-01\").config.tags").Value().([]interface{}), "has the expected network tags") - assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config net.core.netdev_max_backlog sysctl") - assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.rmem_max").Int(), "has the expected linux node config net.core.rmem_max sysctl") - - // nodePool-02 Assertions - assert.Equal("pool-02", cluster.Get("nodePools.#(name==\"pool-02\").name").String(), "pool-2 exists") - assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-02\").config.machineType").String(), "is the expected machine type") - assert.True(cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.enabled").Bool(), "has autoscaling enabled") - assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.minNodeCount").Int(), "has the expected minimum node count") - assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.maxNodeCount").Int(), "has the expected maximum node count") - assert.Equal(int64(30), cluster.Get("nodePools.#(name==\"pool-02\").config.diskSizeGb").Int(), "has the expected disk size") - assert.Equal("pd-standard", cluster.Get("nodePools.#(name==\"pool-02\").config.diskType").String(), "has the expected disk type") - assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-02\").config.imageType").String(), "has the expected image") - assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-02"}`, clusterName), - cluster.Get("nodePools.#(name==\"pool-02\").config.labels").String(), "has the expected labels") - assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-02", clusterName)}, - cluster.Get("nodePools.#(name==\"pool-02\").config.tags").Value().([]interface{}), "has the expected network tags") - assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-02\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") - - // nodwPool-03 Assertions - assert.Equal("pool-03", cluster.Get("nodePools.#(name==\"pool-03\").name").String(), "pool-3 exists") - assert.JSONEq(fmt.Sprintf(`["%s-b", "%s-c"]`, location, location), cluster.Get("nodePools.#(name==\"pool-03\").locations").String(), "has nodes in correct locations") - assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-03\").config.machineType").String(), "is the expected machine type") - assert.False(cluster.Get("nodePools.#(name==\"pool-03\").autoscaling.enabled").Bool(), "has autoscaling enabled") - assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-03\").initialNodeCount").Int(), "has the expected inital node count") - assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoRepair").Bool(), "has autorepair enabled") - assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoUpgrade").Bool(), "has automatic upgrades enabled") - assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-03", "sandbox.gke.io/runtime": "gvisor"}`, clusterName), - cluster.Get("nodePools.#(name==\"pool-03\").config.labels").String(), "has the expected labels") - assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-03", clusterName)}, - cluster.Get("nodePools.#(name==\"pool-03\").config.tags").Value().([]interface{}), "has the expected network tags") - assert.Equal("172.16.0.0/18", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podIpv4CidrBlock").String(), "has the expected pod range") - assert.Equal("test", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podRange").String(), "has the expected pod range") - assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-03\").config.imageType").String(), "has the expected image") - assert.Equal("static", cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuManagerPolicy").String(), "has the expected cpuManagerPolicy kubelet config") - assert.True(cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuCfsQuota").Bool(), "has the expected cpuCfsQuota kubelet config") - assert.Equal(int64(20000), cluster.Get("nodePools.#(name==\"pool-03\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls") - - // nodePool-04 Assertions - assert.Equal("pool-04", cluster.Get("nodePools.#(name==\"pool-04\").name").String(), "pool-4 exists") - assert.False(cluster.Get("nodePools.#(name==\"pool-04\").config.queuedProvisioning.enabled").Bool(), "has queued provisioning not enabled") - // nodePool-05 Assertions - assert.Equal("pool-05", cluster.Get("nodePools.#(name==\"pool-05\").name").String(), "pool-5 exists") - assert.True(cluster.Get("nodePools.#(name==\"pool-05\").config.advancedMachineFeatures.enableNestedVirtualization").Bool(), "has enable_nested_virtualization enabled") + // Cluster (and listed node pools) Assertions + testutils.TGKEAssertGolden(assert, g, &cluster, []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"}, []string{"monitoringConfig.componentConfig.enableComponents"}) // TODO: enableComponents is UL // K8s Assertions - gcloud.Runf(t, "container clusters get-credentials %s --region %s --project %s", clusterName, location, projectId) - k8sOpts := k8s.KubectlOptions{} - clusterNodesOp, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "nodes", "-o", "json") - assert.NoError(err) - clusterNodes := utils.ParseKubectlJSONResult(t, clusterNodesOp) assert.JSONEq(`[ { "effect": "PreferNoSchedule", @@ -178,7 +75,7 @@ func TestNodePool(t *testing.T) { "value": "true" } ]`, - clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-01\").spec.taints").String(), "has the expected taints") + projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-01\").resource.data.spec.taints").String(), "has the expected taints") assert.JSONEq(`[ { "effect": "PreferNoSchedule", @@ -191,7 +88,7 @@ func TestNodePool(t *testing.T) { "value": "present" } ]`, - clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-02\").spec.taints").String(), "has the expected all-pools-example taint") + projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-02\").resource.data.spec.taints").String(), "has the expected all-pools-example taint") assert.JSONEq(`[ { "effect": "PreferNoSchedule", @@ -204,7 +101,7 @@ func TestNodePool(t *testing.T) { "value": "gvisor" } ]`, - clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-03\").spec.taints").String(), "has the expected all-pools-example taint") + projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-03\").resource.data.spec.taints").String(), "has the expected all-pools-example taint") }) bpt.Test() diff --git a/test/integration/node_pool/testdata/TestNodePool.json b/test/integration/node_pool/testdata/TestNodePool.json index 15a7a12de..413d0ce05 100644 --- a/test/integration/node_pool/testdata/TestNodePool.json +++ b/test/integration/node_pool/testdata/TestNodePool.json @@ -221,7 +221,7 @@ "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -354,7 +354,7 @@ "oauthScopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -451,7 +451,7 @@ "oauthScopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -541,7 +541,7 @@ "sandboxConfig": { "type": "GVISOR" }, - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -632,7 +632,7 @@ "reservationAffinity": { "consumeReservationType": "NO_RESERVATION" }, - "serviceAccount": "default", + "serviceAccount": "NODE_SERVICE_ACCOUNT", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, @@ -724,7 +724,7 @@ "oauthScopes": [ "https://www.googleapis.com/auth/cloud-platform" ], - "serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com", + "serviceAccount": "default", "shieldedInstanceConfig": { "enableIntegrityMonitoring": true }, diff --git a/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go b/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go index aa912771d..d57745112 100644 --- a/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go +++ b/test/integration/safer_cluster_iap_bastion/safer_cluster_iap_bastion_test.go @@ -35,20 +35,20 @@ func TestSaferClusterIapBastion(t *testing.T) { // bpt.DefaultVerify(assert) testutils.TGKEVerify(t, bpt, assert) // Verify Resources - test_command, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ") + testCommand, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ") // pre run ssh command so that ssh-keygen can run - gcloud.RunCmd(t, test_command, + gcloud.RunCmd(t, testCommand, gcloud.WithCommonArgs([]string{}), ) - cluster_version := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version")) + clusterVersion := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version")) - op := gcloud.Run(t, test_command, + op := gcloud.Run(t, testCommand, gcloud.WithCommonArgs([]string{}), ) - assert.Equal(cluster_version, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE") + assert.Equal(clusterVersion, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE") }) bpt.Test() diff --git a/test/integration/testutils/utils.go b/test/integration/testutils/utils.go index 1554678b9..0711bca79 100644 --- a/test/integration/testutils/utils.go +++ b/test/integration/testutils/utils.go @@ -15,14 +15,19 @@ package testutils import ( + "fmt" "slices" "strings" "testing" "time" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden" "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft" + "github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils" tfjson "github.com/hashicorp/terraform-json" "github.com/stretchr/testify/assert" + "github.com/tidwall/gjson" + "golang.org/x/sync/errgroup" ) var ( @@ -36,6 +41,8 @@ var ( // API Rate limit exceeded errors can be retried. ".*rateLimitExceeded.*": "Rate limit exceeded.", } + + ClusterAlwaysExemptPaths = []string{"nodePools"} // node pools are separately checked by name ) func GetTestProjectFromSetup(t *testing.T, idx int) string { @@ -67,3 +74,42 @@ func TGKEVerifyExemptResources(t *testing.T, b *tft.TFBlueprintTest, assert *ass assert.Equal(tfjson.Actions{tfjson.ActionNoop}, r.Change.Actions, "Plan must be no-op for resource: %s", r.Address) } } + +// TGKEAssertGolden asserts a cluster and listed node pools against paths in golden image +func TGKEAssertGolden(assert *assert.Assertions, golden *golden.GoldenFile, clusterJson *gjson.Result, nodePools []string, exemptClusterPaths []string) { + // Retrieve golden paths + clusterCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON()) + + // Remove exempt cluster paths + exemptPaths := slices.Concat(exemptClusterPaths, ClusterAlwaysExemptPaths) + clusterCheckPaths = slices.DeleteFunc(clusterCheckPaths, func(s string) bool { + for _, exempPath := range exemptPaths { + if strings.HasPrefix(s, exempPath) { + return true + } + } + return false + }) + + // Cluster assertions + golden.JSONPathEqs(assert, *clusterJson, clusterCheckPaths) + + // NodePool assertions + for _, nodePool := range nodePools { + assert.Truef(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)).Exists(), "NodePool not found: %s", nodePool) + + nodeCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool))) + + syncGroup := new(errgroup.Group) + syncGroup.SetLimit(24) + for _, nodeCheckPath := range nodeCheckPaths { + nodeCheckPath := nodeCheckPath + syncGroup.Go(func() error { + gotData := golden.ApplySanitizers(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String()) + gfData := golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String() + assert.Equalf(gfData, gotData, "For node %s path %q expected %q to match fixture %q", nodePool, nodeCheckPath, gotData, gfData) + return nil + }) + } + } +}