Skip to content

Commit

Permalink
final work
Browse files Browse the repository at this point in the history
  • Loading branch information
apeabody committed Dec 20, 2024
1 parent 8b7ddd4 commit 43734be
Show file tree
Hide file tree
Showing 7 changed files with 355 additions and 132 deletions.
286 changes: 281 additions & 5 deletions build/int.cloudbuild.yaml

Large diffs are not rendered by default.

4 changes: 4 additions & 0 deletions test/fixtures/node_pool/outputs.tf
Original file line number Diff line number Diff line change
Expand Up @@ -87,3 +87,7 @@ output "registry_project_ids" {
output "random_string" {
value = random_string.suffix.result
}

output "compute_engine_service_account" {
value = var.compute_engine_service_accounts[0]
}
4 changes: 2 additions & 2 deletions test/integration/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ require (
github.com/gruntwork-io/terratest v0.48.1
github.com/hashicorp/terraform-json v0.24.0
github.com/stretchr/testify v1.10.0
github.com/tidwall/gjson v1.18.0
golang.org/x/sync v0.10.0
)

require (
Expand Down Expand Up @@ -103,7 +105,6 @@ require (
github.com/pquerna/otp v1.4.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tidwall/gjson v1.18.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/sjson v1.2.5 // indirect
Expand All @@ -116,7 +117,6 @@ require (
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.31.0 // indirect
golang.org/x/oauth2 v0.24.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
Expand Down
125 changes: 11 additions & 114 deletions test/integration/node_pool/node_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,12 @@ package node_pool

import (
"fmt"
"slices"
"testing"
"time"

"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/cai"
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/gcloud"
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden"
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft"
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/stretchr/testify/assert"
"github.com/terraform-google-modules/terraform-google-kubernetes-engine/test/integration/testutils"
)
Expand All @@ -44,128 +40,29 @@ func TestNodePool(t *testing.T) {
clusterName := bpt.GetStringOutput("cluster_name")
randomString := bpt.GetStringOutput("random_string")
kubernetesEndpoint := bpt.GetStringOutput("kubernetes_endpoint")
nodeServiceAccount := bpt.GetStringOutput("compute_engine_service_account")

// Retrieve Project CAI
projectCAI := cai.GetProjectResources(t, projectId, cai.WithAssetTypes([]string{"container.googleapis.com/Cluster", "k8s.io/Node"}))
t.Log(projectCAI.Raw)
// Retrieve Cluster from CAI
clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName)

if !projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data").Exists() {
t.Fatalf("Cluster not found: %s", clusterResourceName)
}

cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data")
t.Log(cluster.Raw)
// Retrieve Cluster from CAI
// Equivalent gcloud describe command (classic)
// cluster := gcloud.Runf(t, "container clusters describe %s --zone %s --project %s", clusterName, location, projectId)
clusterResourceName := fmt.Sprintf("//container.googleapis.com/projects/%s/locations/%s/clusters/%s", projectId, location, clusterName)
cluster := projectCAI.Get("#(name=\"" + clusterResourceName + "\").resource.data")

// Cluster Assertions (classic)
assert.Contains([]string{"RUNNING", "RECONCILING"}, cluster.Get("status").String(), "Cluster is Running")
assert.Equal("COS_CONTAINERD", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.imageType").String(), "has the expected image type")
assert.Equal("https://www.googleapis.com/auth/cloud-platform", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.oauthScopes.0").String(), "has the expected oauth scopes")
assert.Equal("default", cluster.Get("autoscaling.autoprovisioningNodePoolDefaults.serviceAccount").String(), "has the expected service account")
assert.Equal("OPTIMIZE_UTILIZATION", cluster.Get("autoscaling.autoscalingProfile").String(), "has the expected autoscaling profile")
assert.True(cluster.Get("autoscaling.enableNodeAutoprovisioning").Bool(), "has the expected node autoprovisioning")
assert.JSONEq(`[
{
"maximum": "20",
"minimum": "5",
"resourceType": "cpu"
},
{
"maximum": "30",
"minimum": "10",
"resourceType": "memory"
}
]`,
cluster.Get("autoscaling.resourceLimits").String(), "has the expected resource limits")

// Cluster Assertions using golden image (TestNodePool.json) with sanitizer
// Setup golden image with sanitizers
g := golden.NewOrUpdate(t, cluster.String(),
golden.WithSanitizer(golden.StringSanitizer(nodeServiceAccount, "NODE_SERVICE_ACCOUNT")),
golden.WithSanitizer(golden.StringSanitizer(projectId, "PROJECT_ID")),
golden.WithSanitizer(golden.StringSanitizer(randomString, "RANDOM_STRING")),
golden.WithSanitizer(golden.StringSanitizer(kubernetesEndpoint, "KUBERNETES_ENDPOINT")),
)
checkPaths := utils.GetTerminalJSONPaths(g.GetJSON())

exemptPaths := []string{"nodePools"}
checkPaths = slices.DeleteFunc(checkPaths, func(s string) bool {
return slices.Contains(exemptPaths, s)
})
g.JSONPathEqs(assert, cluster, checkPaths)

// NodePool Assertions
nodePools := []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"}
for _, nodePool := range nodePools {
g.JSONPathEqs(assert, cluster.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)), utils.GetTerminalJSONPaths(g.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool))))
}

// nodePool-01 Assertions
assert.Equal("pool-01", cluster.Get("nodePools.#(name==\"pool-01\").name").String(), "pool-1 exists")
assert.Equal("e2-medium", cluster.Get("nodePools.#(name==\"pool-01\").config.machineType").String(), "is the expected machine type")
assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-01\").config.imageType").String(), "has the expected image")
assert.True(cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.enabled").Bool(), "has autoscaling enabled")
assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-01\").autoscaling.minNodeCount").Int(), "has the expected minimum node count")
assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoRepair").Bool(), "has autorepair enabled")
assert.True(cluster.Get("nodePools.#(name==\"pool-01\").management.autoUpgrade").Bool(), "has automatic upgrades enabled")
assert.Equal("kubectl --kubeconfig=/var/lib/kubelet/kubeconfig drain --force=true --ignore-daemonsets=true --delete-local-data \"$HOSTNAME\"", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.shutdown-script").String(), "pool-2 exists")
assert.Equal("false", cluster.Get("nodePools.#(name==\"pool-01\").config.metadata.disable-legacy-endpoints").String(), "pool-2 exists")
assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "pool-01-example": "true", "cluster_name": "%s", "node_pool": "pool-01"}`, clusterName),
cluster.Get("nodePools.#(name==\"pool-01\").config.labels").String(), "has the expected labels")
assert.ElementsMatch([]string{"all-node-example", "pool-01-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-01", clusterName)},
cluster.Get("nodePools.#(name==\"pool-01\").config.tags").Value().([]interface{}), "has the expected network tags")
assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config net.core.netdev_max_backlog sysctl")
assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-01\").config.linuxNodeConfig.sysctls.net\\.core\\.rmem_max").Int(), "has the expected linux node config net.core.rmem_max sysctl")

// nodePool-02 Assertions
assert.Equal("pool-02", cluster.Get("nodePools.#(name==\"pool-02\").name").String(), "pool-2 exists")
assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-02\").config.machineType").String(), "is the expected machine type")
assert.True(cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.enabled").Bool(), "has autoscaling enabled")
assert.Equal(int64(1), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.minNodeCount").Int(), "has the expected minimum node count")
assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-02\").autoscaling.maxNodeCount").Int(), "has the expected maximum node count")
assert.Equal(int64(30), cluster.Get("nodePools.#(name==\"pool-02\").config.diskSizeGb").Int(), "has the expected disk size")
assert.Equal("pd-standard", cluster.Get("nodePools.#(name==\"pool-02\").config.diskType").String(), "has the expected disk type")
assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-02\").config.imageType").String(), "has the expected image")
assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-02"}`, clusterName),
cluster.Get("nodePools.#(name==\"pool-02\").config.labels").String(), "has the expected labels")
assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-02", clusterName)},
cluster.Get("nodePools.#(name==\"pool-02\").config.tags").Value().([]interface{}), "has the expected network tags")
assert.Equal(int64(10000), cluster.Get("nodePools.#(name==\"pool-02\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls")

// nodwPool-03 Assertions
assert.Equal("pool-03", cluster.Get("nodePools.#(name==\"pool-03\").name").String(), "pool-3 exists")
assert.JSONEq(fmt.Sprintf(`["%s-b", "%s-c"]`, location, location), cluster.Get("nodePools.#(name==\"pool-03\").locations").String(), "has nodes in correct locations")
assert.Equal("n1-standard-2", cluster.Get("nodePools.#(name==\"pool-03\").config.machineType").String(), "is the expected machine type")
assert.False(cluster.Get("nodePools.#(name==\"pool-03\").autoscaling.enabled").Bool(), "has autoscaling enabled")
assert.Equal(int64(2), cluster.Get("nodePools.#(name==\"pool-03\").initialNodeCount").Int(), "has the expected inital node count")
assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoRepair").Bool(), "has autorepair enabled")
assert.True(cluster.Get("nodePools.#(name==\"pool-03\").management.autoUpgrade").Bool(), "has automatic upgrades enabled")
assert.JSONEq(fmt.Sprintf(`{"all-pools-example": "true", "cluster_name": "%s", "node_pool": "pool-03", "sandbox.gke.io/runtime": "gvisor"}`, clusterName),
cluster.Get("nodePools.#(name==\"pool-03\").config.labels").String(), "has the expected labels")
assert.ElementsMatch([]string{"all-node-example", fmt.Sprintf("gke-%s", clusterName), fmt.Sprintf("gke-%s-pool-03", clusterName)},
cluster.Get("nodePools.#(name==\"pool-03\").config.tags").Value().([]interface{}), "has the expected network tags")
assert.Equal("172.16.0.0/18", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podIpv4CidrBlock").String(), "has the expected pod range")
assert.Equal("test", cluster.Get("nodePools.#(name==\"pool-03\").networkConfig.podRange").String(), "has the expected pod range")
assert.Equal("COS_CONTAINERD", cluster.Get("nodePools.#(name==\"pool-03\").config.imageType").String(), "has the expected image")
assert.Equal("static", cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuManagerPolicy").String(), "has the expected cpuManagerPolicy kubelet config")
assert.True(cluster.Get("nodePools.#(name==\"pool-03\").config.kubeletConfig.cpuCfsQuota").Bool(), "has the expected cpuCfsQuota kubelet config")
assert.Equal(int64(20000), cluster.Get("nodePools.#(name==\"pool-03\").config.linuxNodeConfig.sysctls.net\\.core\\.netdev_max_backlog").Int(), "has the expected linux node config sysctls")

// nodePool-04 Assertions
assert.Equal("pool-04", cluster.Get("nodePools.#(name==\"pool-04\").name").String(), "pool-4 exists")
assert.False(cluster.Get("nodePools.#(name==\"pool-04\").config.queuedProvisioning.enabled").Bool(), "has queued provisioning not enabled")

// nodePool-05 Assertions
assert.Equal("pool-05", cluster.Get("nodePools.#(name==\"pool-05\").name").String(), "pool-5 exists")
assert.True(cluster.Get("nodePools.#(name==\"pool-05\").config.advancedMachineFeatures.enableNestedVirtualization").Bool(), "has enable_nested_virtualization enabled")
// Cluster (and listed node pools) Assertions
testutils.TGKEAssertGolden(assert, g, &cluster, []string{"pool-01", "pool-02", "pool-03", "pool-04", "pool-05"}, []string{"monitoringConfig.componentConfig.enableComponents"}) // TODO: enableComponents is UL

// K8s Assertions
gcloud.Runf(t, "container clusters get-credentials %s --region %s --project %s", clusterName, location, projectId)
k8sOpts := k8s.KubectlOptions{}
clusterNodesOp, err := k8s.RunKubectlAndGetOutputE(t, &k8sOpts, "get", "nodes", "-o", "json")
assert.NoError(err)
clusterNodes := utils.ParseKubectlJSONResult(t, clusterNodesOp)
assert.JSONEq(`[
{
"effect": "PreferNoSchedule",
Expand All @@ -178,7 +75,7 @@ func TestNodePool(t *testing.T) {
"value": "true"
}
]`,
clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-01\").spec.taints").String(), "has the expected taints")
projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-01\").resource.data.spec.taints").String(), "has the expected taints")
assert.JSONEq(`[
{
"effect": "PreferNoSchedule",
Expand All @@ -191,7 +88,7 @@ func TestNodePool(t *testing.T) {
"value": "present"
}
]`,
clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-02\").spec.taints").String(), "has the expected all-pools-example taint")
projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-02\").resource.data.spec.taints").String(), "has the expected all-pools-example taint")
assert.JSONEq(`[
{
"effect": "PreferNoSchedule",
Expand All @@ -204,7 +101,7 @@ func TestNodePool(t *testing.T) {
"value": "gvisor"
}
]`,
clusterNodes.Get("items.#(metadata.labels.node_pool==\"pool-03\").spec.taints").String(), "has the expected all-pools-example taint")
projectCAI.Get("#(resource.data.metadata.labels.node_pool==\"pool-03\").resource.data.spec.taints").String(), "has the expected all-pools-example taint")
})

bpt.Test()
Expand Down
12 changes: 6 additions & 6 deletions test/integration/node_pool/testdata/TestNodePool.json
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/cloud-platform"
],
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
"serviceAccount": "NODE_SERVICE_ACCOUNT",
"shieldedInstanceConfig": {
"enableIntegrityMonitoring": true
},
Expand Down Expand Up @@ -354,7 +354,7 @@
"oauthScopes": [
"https://www.googleapis.com/auth/cloud-platform"
],
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
"serviceAccount": "NODE_SERVICE_ACCOUNT",
"shieldedInstanceConfig": {
"enableIntegrityMonitoring": true
},
Expand Down Expand Up @@ -451,7 +451,7 @@
"oauthScopes": [
"https://www.googleapis.com/auth/cloud-platform"
],
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
"serviceAccount": "NODE_SERVICE_ACCOUNT",
"shieldedInstanceConfig": {
"enableIntegrityMonitoring": true
},
Expand Down Expand Up @@ -541,7 +541,7 @@
"sandboxConfig": {
"type": "GVISOR"
},
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
"serviceAccount": "NODE_SERVICE_ACCOUNT",
"shieldedInstanceConfig": {
"enableIntegrityMonitoring": true
},
Expand Down Expand Up @@ -632,7 +632,7 @@
"reservationAffinity": {
"consumeReservationType": "NO_RESERVATION"
},
"serviceAccount": "default",
"serviceAccount": "NODE_SERVICE_ACCOUNT",
"shieldedInstanceConfig": {
"enableIntegrityMonitoring": true
},
Expand Down Expand Up @@ -724,7 +724,7 @@
"oauthScopes": [
"https://www.googleapis.com/auth/cloud-platform"
],
"serviceAccount": "gke-sa-int-test-p1-8220@PROJECT_ID.iam.gserviceaccount.com",
"serviceAccount": "default",
"shieldedInstanceConfig": {
"enableIntegrityMonitoring": true
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,20 +35,20 @@ func TestSaferClusterIapBastion(t *testing.T) {
// bpt.DefaultVerify(assert)
testutils.TGKEVerify(t, bpt, assert) // Verify Resources

test_command, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ")
testCommand, _ := strings.CutPrefix(bpt.GetStringOutput("test_command"), "gcloud ")

// pre run ssh command so that ssh-keygen can run
gcloud.RunCmd(t, test_command,
gcloud.RunCmd(t, testCommand,
gcloud.WithCommonArgs([]string{}),
)

cluster_version := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version"))
clusterVersion := fmt.Sprintf("v%s", bpt.GetStringOutput("cluster_version"))

op := gcloud.Run(t, test_command,
op := gcloud.Run(t, testCommand,
gcloud.WithCommonArgs([]string{}),
)

assert.Equal(cluster_version, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE")
assert.Equal(clusterVersion, op.Get("gitVersion").String(), "SSH into VM and verify connectivity to GKE")
})

bpt.Test()
Expand Down
46 changes: 46 additions & 0 deletions test/integration/testutils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,19 @@
package testutils

import (
"fmt"
"slices"
"strings"
"testing"
"time"

"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/golden"
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/tft"
"github.com/GoogleCloudPlatform/cloud-foundation-toolkit/infra/blueprint-test/pkg/utils"
tfjson "github.com/hashicorp/terraform-json"
"github.com/stretchr/testify/assert"
"github.com/tidwall/gjson"
"golang.org/x/sync/errgroup"
)

var (
Expand All @@ -36,6 +41,8 @@ var (
// API Rate limit exceeded errors can be retried.
".*rateLimitExceeded.*": "Rate limit exceeded.",
}

ClusterAlwaysExemptPaths = []string{"nodePools"} // node pools are separately checked by name
)

func GetTestProjectFromSetup(t *testing.T, idx int) string {
Expand Down Expand Up @@ -67,3 +74,42 @@ func TGKEVerifyExemptResources(t *testing.T, b *tft.TFBlueprintTest, assert *ass
assert.Equal(tfjson.Actions{tfjson.ActionNoop}, r.Change.Actions, "Plan must be no-op for resource: %s", r.Address)
}
}

// TGKEAssertGolden asserts a cluster and listed node pools against paths in golden image
func TGKEAssertGolden(assert *assert.Assertions, golden *golden.GoldenFile, clusterJson *gjson.Result, nodePools []string, exemptClusterPaths []string) {
// Retrieve golden paths
clusterCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON())

// Remove exempt cluster paths
exemptPaths := slices.Concat(exemptClusterPaths, ClusterAlwaysExemptPaths)
clusterCheckPaths = slices.DeleteFunc(clusterCheckPaths, func(s string) bool {
for _, exempPath := range exemptPaths {
if strings.HasPrefix(s, exempPath) {
return true
}
}
return false
})

// Cluster assertions
golden.JSONPathEqs(assert, *clusterJson, clusterCheckPaths)

// NodePool assertions
for _, nodePool := range nodePools {
assert.Truef(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s).name", nodePool)).Exists(), "NodePool not found: %s", nodePool)

nodeCheckPaths := utils.GetTerminalJSONPaths(golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)))

syncGroup := new(errgroup.Group)
syncGroup.SetLimit(24)
for _, nodeCheckPath := range nodeCheckPaths {
nodeCheckPath := nodeCheckPath
syncGroup.Go(func() error {
gotData := golden.ApplySanitizers(clusterJson.Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String())
gfData := golden.GetJSON().Get(fmt.Sprintf("nodePools.#(name==%s)", nodePool)).Get(nodeCheckPath).String()
assert.Equalf(gfData, gotData, "For node %s path %q expected %q to match fixture %q", nodePool, nodeCheckPath, gotData, gfData)
return nil
})
}
}
}

0 comments on commit 43734be

Please sign in to comment.