Skip to content

Commit

Permalink
Merge the schemas and logic for the node pool resource and the node p…
Browse files Browse the repository at this point in the history
…ool field in the cluster to aid in maintainability (hashicorp#489)
  • Loading branch information
danawillow authored Oct 4, 2017
1 parent 7bfcabe commit bb0ab8e
Show file tree
Hide file tree
Showing 5 changed files with 452 additions and 264 deletions.
121 changes: 8 additions & 113 deletions google/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"strings"
"time"

"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"google.golang.org/api/container/v1"
Expand Down Expand Up @@ -257,37 +256,7 @@ func resourceContainerCluster() *schema.Resource {
Computed: true,
ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"initial_node_count": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Computed: true,
Deprecated: "Use node_count instead",
},

"node_count": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.IntAtLeast(1),
},

"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},

"name_prefix": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},

"node_config": schemaNodeConfig,
},
Schema: schemaNodePool,
},
},

Expand Down Expand Up @@ -411,36 +380,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
if nodePoolsCount > 0 {
nodePools := make([]*container.NodePool, 0, nodePoolsCount)
for i := 0; i < nodePoolsCount; i++ {
prefix := fmt.Sprintf("node_pool.%d", i)

nodeCount := 0
if initialNodeCount, ok := d.GetOk(prefix + ".initial_node_count"); ok {
nodeCount = initialNodeCount.(int)
}
if nc, ok := d.GetOk(prefix + ".node_count"); ok {
if nodeCount != 0 {
return fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %d", i)
}
nodeCount = nc.(int)
}
if nodeCount == 0 {
return fmt.Errorf("Node pool %d cannot be set with 0 node count", i)
}

name, err := generateNodePoolName(prefix, d)
prefix := fmt.Sprintf("node_pool.%d.", i)
nodePool, err := expandNodePool(d, prefix)
if err != nil {
return err
}

nodePool := &container.NodePool{
Name: name,
InitialNodeCount: int64(nodeCount),
}

if v, ok := d.GetOk(prefix + ".node_config"); ok {
nodePool.Config = expandNodeConfig(v)
}

nodePools = append(nodePools, nodePool)
}
cluster.NodePools = nodePools
Expand Down Expand Up @@ -654,24 +598,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er

if n, ok := d.GetOk("node_pool.#"); ok {
for i := 0; i < n.(int); i++ {
if d.HasChange(fmt.Sprintf("node_pool.%d.node_count", i)) {
newSize := int64(d.Get(fmt.Sprintf("node_pool.%d.node_count", i)).(int))
req := &container.SetNodePoolSizeRequest{
NodeCount: newSize,
}
npName := d.Get(fmt.Sprintf("node_pool.%d.name", i)).(string)
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetSize(project, zoneName, clusterName, npName, req).Do()
if err != nil {
return err
}

// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE node pool size", timeoutInMinutes, 2)
if waitErr != nil {
return waitErr
}

log.Printf("[INFO] GKE node pool %s size has been updated to %d", npName, newSize)
if err := nodePoolUpdate(d, meta, clusterName, fmt.Sprintf("node_pool.%d.", i), timeoutInMinutes); err != nil {
return err
}
}
d.SetPartial("node_pool")
Expand Down Expand Up @@ -765,49 +693,16 @@ func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*contai
nodePools := make([]map[string]interface{}, 0, len(c))

for i, np := range c {
// Node pools don't expose the current node count in their API, so read the
// instance groups instead. They should all have the same size, but in case a resize
// failed or something else strange happened, we'll just use the average size.
size := 0
for _, url := range np.InstanceGroupUrls {
// retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers)
matches := instanceGroupManagerURL.FindStringSubmatch(url)
igm, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do()
if err != nil {
return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err)
}
size += int(igm.TargetSize)
}
nodePool := map[string]interface{}{
"name": np.Name,
"name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)),
"initial_node_count": np.InitialNodeCount,
"node_count": size / len(np.InstanceGroupUrls),
"node_config": flattenNodeConfig(np.Config),
nodePool, err := flattenNodePool(d, config, np, fmt.Sprintf("node_pool.%d.", i))
if err != nil {
return nil, err
}
nodePools = append(nodePools, nodePool)
}

return nodePools, nil
}

func generateNodePoolName(prefix string, d *schema.ResourceData) (string, error) {
name, okName := d.GetOk(prefix + ".name")
namePrefix, okPrefix := d.GetOk(prefix + ".name_prefix")

if okName && okPrefix {
return "", fmt.Errorf("Cannot specify both name and name_prefix for a node_pool")
}

if okName {
return name.(string), nil
} else if okPrefix {
return resource.PrefixedUniqueId(namePrefix.(string)), nil
} else {
return resource.UniqueId(), nil
}
}

func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
parts := strings.Split(d.Id(), "/")
if len(parts) != 2 {
Expand Down
111 changes: 106 additions & 5 deletions google/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -281,13 +281,16 @@ func TestAccContainerCluster_withLogging(t *testing.T) {
}

func TestAccContainerCluster_withNodePoolBasic(t *testing.T) {
clusterName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
npName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withNodePoolBasic,
Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_node_pool"),
Expand Down Expand Up @@ -325,6 +328,43 @@ func TestAccContainerCluster_withNodePoolResize(t *testing.T) {
})
}

func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) {
clusterName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
npName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerCluster_withNodePoolAutoscaling(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster("google_container_cluster.with_node_pool"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "3"),
),
},
resource.TestStep{
Config: testAccContainerCluster_withNodePoolUpdateAutoscaling(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster("google_container_cluster.with_node_pool"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "5"),
),
},
resource.TestStep{
Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster("google_container_cluster.with_node_pool"),
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count"),
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count"),
),
},
},
})
}

func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Expand Down Expand Up @@ -511,6 +551,21 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc {
clusterTestField{prefix + "node_config.0.tags", np.Config.Tags})

}
tfAS := attributes[prefix+"autoscaling.#"] == "1"
if gcpAS := np.Autoscaling != nil && np.Autoscaling.Enabled == true; tfAS != gcpAS {
return fmt.Errorf("Mismatched autoscaling status. TF State: %t. GCP State: %t", tfAS, gcpAS)
}
if tfAS {
if tf := attributes[prefix+"autoscaling.0.min_node_count"]; strconv.FormatInt(np.Autoscaling.MinNodeCount, 10) != tf {
return fmt.Errorf("Mismatched Autoscaling.MinNodeCount. TF State: %s. GCP State: %d",
tf, np.Autoscaling.MinNodeCount)
}

if tf := attributes[prefix+"autoscaling.0.max_node_count"]; strconv.FormatInt(np.Autoscaling.MaxNodeCount, 10) != tf {
return fmt.Errorf("Mismatched Autoscaling.MaxNodeCount. TF State: %s. GCP State: %d",
tf, np.Autoscaling.MaxNodeCount)
}
}
}

for _, attrs := range clusterTests {
Expand Down Expand Up @@ -950,9 +1005,10 @@ resource "google_container_cluster" "with_logging" {
}`, clusterName)
}

var testAccContainerCluster_withNodePoolBasic = fmt.Sprintf(`
func testAccContainerCluster_withNodePoolBasic(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "tf-cluster-nodepool-test-%s"
name = "%s"
zone = "us-central1-a"
master_auth {
Expand All @@ -961,10 +1017,11 @@ resource "google_container_cluster" "with_node_pool" {
}
node_pool {
name = "tf-cluster-nodepool-test-%s"
name = "%s"
initial_node_count = 2
}
}`, acctest.RandString(10), acctest.RandString(10))
}`, cluster, nodePool)
}

func testAccContainerCluster_withNodePoolAdditionalZones(cluster, nodePool string) string {
return fmt.Sprintf(`
Expand Down Expand Up @@ -1002,6 +1059,50 @@ resource "google_container_cluster" "with_node_pool" {
}`, cluster, nodePool)
}

func testAccContainerCluster_withNodePoolAutoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
zone = "us-central1-a"
master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
node_pool {
name = "%s"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 3
}
}
}`, cluster, np)
}

func testAccContainerCluster_withNodePoolUpdateAutoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
zone = "us-central1-a"
master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
node_pool {
name = "%s"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 5
}
}
}`, cluster, np)
}

var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool_name_prefix" {
name = "tf-cluster-nodepool-test-%s"
Expand Down
Loading

0 comments on commit bb0ab8e

Please sign in to comment.