Skip to content

Commit

Permalink
Add support for node_locations to google_container_node_pool. (#2320)
Browse files Browse the repository at this point in the history
Merged PR #2320.
  • Loading branch information
rileykarson authored and modular-magician committed Sep 17, 2019
1 parent c031961 commit 030eaf2
Show file tree
Hide file tree
Showing 7 changed files with 166 additions and 18 deletions.
2 changes: 1 addition & 1 deletion build/terraform
2 changes: 1 addition & 1 deletion build/terraform-beta
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,15 @@ var schemaNodePool = map[string]*schema.Schema{
Computed: true,
},

<% unless version == 'ga' -%>
"node_locations": {
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
<% end -%>

"initial_node_count": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -479,10 +488,21 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*containerBeta.NodeP
nodeCount = nc.(int)
}


<% unless version == 'ga' -%>
var locations []string
if v, ok := d.GetOk("node_locations"); ok && v.(*schema.Set).Len() > 0 {
locations = convertStringSet(v.(*schema.Set))
}
<% end -%>

np := &containerBeta.NodePool{
Name: name,
InitialNodeCount: int64(nodeCount),
Config: expandNodeConfig(d.Get(prefix + "node_config")),
<% unless version == 'ga' -%>
Locations: locations,
<% end -%>
Version: d.Get(prefix + "version").(string),
}

Expand Down Expand Up @@ -541,6 +561,9 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *containerBeta.N
"name": np.Name,
"name_prefix": d.Get(prefix + "name_prefix"),
"initial_node_count": np.InitialNodeCount,
<% unless version == 'ga' -%>
"node_locations": schema.NewSet(schema.HashString, convertStringArrToInterface(np.Locations)),
<% end -%>
"node_count": size / len(np.InstanceGroupUrls),
"node_config": flattenNodeConfig(np.Config),
"instance_group_urls": np.InstanceGroupUrls,
Expand Down Expand Up @@ -765,6 +788,35 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node
}
}

<% unless version == 'ga' -%>
if d.HasChange(prefix + "node_locations") {
req := &containerBeta.UpdateNodePoolRequest{
Locations: convertStringSet(d.Get(prefix + "node_locations").(*schema.Set)),
}
updateF := func() error {
op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req).Do()

if err != nil {
return err
}

// Wait until it's updated
return containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool node locations", timeoutInMinutes)
}

// Call update serially.
if err := lockedCall(lockKey, updateF); err != nil {
return err
}

log.Printf("[INFO] Updated node locations in Node Pool %s", name)

if prefix == "" {
d.SetPartial("node_locations")
}
}
<% end -%>

return nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,32 @@ func TestAccContainerNodePool_basic(t *testing.T) {
})
}

<% unless version == 'ga' -%>
func TestAccContainerNodePool_nodeLocations(t *testing.T) {
t.Parallel()

cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerNodePool_nodeLocations(cluster, np),
},
resource.TestStep{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
<% end -%>

<% unless version == 'ga' -%>
func TestAccContainerNodePool_maxPodsPerNode(t *testing.T) {
t.Parallel()
Expand Down Expand Up @@ -698,6 +724,64 @@ resource "google_container_node_pool" "np" {
}`, cluster, np)
}

<% unless version == 'ga' -%>
func testAccContainerNodePool_nodeLocations(cluster, np string) string {
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
name = "container-net-%s"
auto_create_subnetworks = false
}

resource "google_compute_subnetwork" "container_subnetwork" {
name = "${google_compute_network.container_network.name}"
network = "${google_compute_network.container_network.name}"
ip_cidr_range = "10.0.36.0/24"
region = "us-central1"
private_ip_google_access = true

secondary_ip_range {
range_name = "pod"
ip_cidr_range = "10.0.0.0/19"
}

secondary_ip_range {
range_name = "svc"
ip_cidr_range = "10.0.32.0/22"
}
}

resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1"
initial_node_count = 1

network = "${google_compute_network.container_network.name}"
subnetwork = "${google_compute_subnetwork.container_subnetwork.name}"
ip_allocation_policy {
cluster_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.0.range_name}"
services_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.1.range_name}"
}

private_cluster_config {
enable_private_endpoint = true
enable_private_nodes = true
master_ipv4_cidr_block = "10.42.0.0/28"
}

master_authorized_networks_config {}
}

resource "google_container_node_pool" "np" {
name = "%s"
location = "us-central1"
cluster = "${google_container_cluster.cluster.name}"

initial_node_count = 1
node_locations = ["us-central1-a", "us-central1-c"]
}`, cluster, cluster, np)
}
<% end -%>

<% unless version == 'ga' -%>
func testAccContainerNodePool_maxPodsPerNode(cluster, np string) string {
return fmt.Sprintf(`
Expand Down
7 changes: 5 additions & 2 deletions third_party/terraform/utils/node_config.go.erb
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,8 @@ func expandNodeConfig(v interface{}) *containerBeta.NodeConfig {
if v, ok := nodeConfig["min_cpu_platform"]; ok {
nc.MinCpuPlatform = v.(string)
}
<% unless version.nil? || version == 'ga' -%>

<% unless version == 'ga' -%>
if v, ok := nodeConfig["taint"]; ok && len(v.([]interface{})) > 0 {
taints := v.([]interface{})
nodeTaints := make([]*containerBeta.NodeTaint, 0, len(taints))
Expand All @@ -329,7 +330,9 @@ func expandNodeConfig(v interface{}) *containerBeta.NodeConfig {
}
nc.Taints = nodeTaints
}
<% end -%>

<% unless version == 'ga' -%>
if v, ok := nodeConfig["workload_metadata_config"]; ok && len(v.([]interface{})) > 0 {
conf := v.([]interface{})[0].(map[string]interface{})
nc.WorkloadMetadataConfig = &containerBeta.WorkloadMetadataConfig{
Expand Down Expand Up @@ -368,7 +371,7 @@ func flattenNodeConfig(c *containerBeta.NodeConfig) []map[string]interface{} {
"tags": c.Tags,
"preemptible": c.Preemptible,
"min_cpu_platform": c.MinCpuPlatform,
<% unless version.nil? || version == 'ga' -%>
<% unless version == 'ga' -%>
"taint": flattenTaints(c.Taints),
"workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig),
"sandbox_config": flattenSandboxConfig(c.SandboxConfig),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,11 +129,9 @@ throughout the region. `region` has been deprecated in favour of `location`.
the provider zone is used to create a zonal cluster.

* `node_locations` - (Optional) The list of zones in which the cluster's nodes
should be located. These must be in the same region as the cluster zone for
zonal clusters, or in the region of a regional cluster. In a multi-zonal cluster,
the number of nodes specified in `initial_node_count` is created in
all specified zones as well as the primary zone. If specified for a regional
cluster, nodes will be created in only these zones.
are located. Nodes must be in the region of their regional cluster or in the
same region as their cluster's zone for zonal clusters. If this is specified for
a zonal cluster, omit the cluster's zone.

-> A "multi-zonal" cluster is a zonal cluster with at least one additional zone
defined; in a multi-zonal cluster, the cluster master is only present in a
Expand Down Expand Up @@ -194,10 +192,11 @@ for more details. Structure is documented below.
Defaults to `false`

* `initial_node_count` - (Optional) The number of nodes to create in this
cluster's default node pool. Must be set if `node_pool` is not set. If
you're using `google_container_node_pool` objects with no default node pool,
you'll need to set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.
cluster's default node pool. In regional or multi-zonal clusters, this is the
number of nodes per zone. Must be set if `node_pool` is not set. If you're using
`google_container_node_pool` objects with no default node pool, you'll need to
set this to a value of at least `1`, alongside setting
`remove_default_node_pool` to `true`.

* `ip_allocation_policy` - (Optional) Configuration for cluster IP allocation. As of now, only pre-allocated subnetworks (custom type with secondary ranges) are supported.
This will activate IP aliases. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,11 @@ resource "google_container_cluster" "primary" {

## Argument Reference

* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters.
* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters.

- - -

* `location` - (Optional) The location (region or zone) in which the cluster
resides.
* `location` - (Optional) The location (region or zone) of the cluster.

* `zone` - (Optional, Deprecated) The zone in which the cluster resides. `zone`
has been deprecated in favor of `location`.
Expand All @@ -119,8 +118,9 @@ type-specific `region` for regional clusters / `zone` for zonal clusters.
* `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust
the size of the node pool to the current cluster usage. Structure is documented below.

* `initial_node_count` - (Optional) The initial node count for the pool. Changing this will force
recreation of the resource.
* `initial_node_count` - (Optional) The initial number of nodes for the pool. In
regional or multi-zonal clusters, this is the number of nodes per zone. Changing
this will force recreation of the resource.

* `management` - (Optional) Node management configuration, wherein auto-repair and
auto-upgrade is configured. Structure is documented below.
Expand All @@ -131,6 +131,16 @@ type-specific `region` for regional clusters / `zone` for zonal clusters.
See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr)
for more information.

* `node_locations` - (Optional, [Beta](https://terraform.io/docs/providers/google/provider_versions.html))
The list of zones in which the node pool's nodes should be located. Nodes must
be in the region of their regional cluster or in the same region as their
cluster's zone for zonal clusters. If unspecified, the cluster-level
`node_locations` will be used.

-> Note: `node_locations` will not revert to the cluster's default set of zones
upon being unset. You must manually reconcile the list of zones with your
cluster.

* `name` - (Optional) The name of the node pool. If left blank, Terraform will
auto-generate a unique name.

Expand Down

0 comments on commit 030eaf2

Please sign in to comment.