diff --git a/build/terraform b/build/terraform index 146690a144d9..ecda9f292a53 160000 --- a/build/terraform +++ b/build/terraform @@ -1 +1 @@ -Subproject commit 146690a144d9d773b521ab41911d2ecd1aa25028 +Subproject commit ecda9f292a53e4dc7d1e8c1502f3eb30b122b157 diff --git a/build/terraform-beta b/build/terraform-beta index 19dff8aef8fe..a1d05c614bcc 160000 --- a/build/terraform-beta +++ b/build/terraform-beta @@ -1 +1 @@ -Subproject commit 19dff8aef8fe6087d5d2b2202504b24cafac2802 +Subproject commit a1d05c614bcc45e86765a71dcc523b1ddae75a9a diff --git a/third_party/terraform/resources/resource_container_node_pool.go.erb b/third_party/terraform/resources/resource_container_node_pool.go.erb index a80e82b4b022..c873a478ac24 100644 --- a/third_party/terraform/resources/resource_container_node_pool.go.erb +++ b/third_party/terraform/resources/resource_container_node_pool.go.erb @@ -108,6 +108,15 @@ var schemaNodePool = map[string]*schema.Schema{ Computed: true, }, +<% unless version == 'ga' -%> + "node_locations": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, +<% end -%> + "initial_node_count": &schema.Schema{ Type: schema.TypeInt, Optional: true, @@ -479,10 +488,21 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*containerBeta.NodeP nodeCount = nc.(int) } + +<% unless version == 'ga' -%> + var locations []string + if v, ok := d.GetOk("node_locations"); ok && v.(*schema.Set).Len() > 0 { + locations = convertStringSet(v.(*schema.Set)) + } +<% end -%> + np := &containerBeta.NodePool{ Name: name, InitialNodeCount: int64(nodeCount), Config: expandNodeConfig(d.Get(prefix + "node_config")), +<% unless version == 'ga' -%> + Locations: locations, +<% end -%> Version: d.Get(prefix + "version").(string), } @@ -541,6 +561,9 @@ func flattenNodePool(d *schema.ResourceData, config *Config, np *containerBeta.N "name": np.Name, "name_prefix": d.Get(prefix + "name_prefix"), "initial_node_count": np.InitialNodeCount, +<% unless version == 'ga' -%> + "node_locations": schema.NewSet(schema.HashString, convertStringArrToInterface(np.Locations)), +<% end -%> "node_count": size / len(np.InstanceGroupUrls), "node_config": flattenNodeConfig(np.Config), "instance_group_urls": np.InstanceGroupUrls, @@ -765,6 +788,35 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } } +<% unless version == 'ga' -%> + if d.HasChange(prefix + "node_locations") { + req := &containerBeta.UpdateNodePoolRequest{ + Locations: convertStringSet(d.Get(prefix + "node_locations").(*schema.Set)), + } + updateF := func() error { + op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req).Do() + + if err != nil { + return err + } + + // Wait until it's updated + return containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool node locations", timeoutInMinutes) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated node locations in Node Pool %s", name) + + if prefix == "" { + d.SetPartial("node_locations") + } + } +<% end -%> + return nil } diff --git a/third_party/terraform/tests/resource_container_node_pool_test.go.erb b/third_party/terraform/tests/resource_container_node_pool_test.go.erb index 58b2d1fa5cfe..ee68c0e1c582 100644 --- a/third_party/terraform/tests/resource_container_node_pool_test.go.erb +++ b/third_party/terraform/tests/resource_container_node_pool_test.go.erb @@ -34,6 +34,32 @@ func TestAccContainerNodePool_basic(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccContainerNodePool_nodeLocations(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10)) + np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerNodePoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerNodePool_nodeLocations(cluster, np), + }, + resource.TestStep{ + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"max_pods_per_node"}, + }, + }, + }) +} +<% end -%> + <% unless version == 'ga' -%> func TestAccContainerNodePool_maxPodsPerNode(t *testing.T) { t.Parallel() @@ -698,6 +724,64 @@ resource "google_container_node_pool" "np" { }`, cluster, np) } +<% unless version == 'ga' -%> +func testAccContainerNodePool_nodeLocations(cluster, np string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "container-net-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = "${google_compute_network.container_network.name}" + network = "${google_compute_network.container_network.name}" + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + network = "${google_compute_network.container_network.name}" + subnetwork = "${google_compute_subnetwork.container_subnetwork.name}" + ip_allocation_policy { + cluster_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.0.range_name}" + services_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.1.range_name}" + } + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + + master_authorized_networks_config {} +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1" + cluster = "${google_container_cluster.cluster.name}" + + initial_node_count = 1 + node_locations = ["us-central1-a", "us-central1-c"] +}`, cluster, cluster, np) +} +<% end -%> + <% unless version == 'ga' -%> func testAccContainerNodePool_maxPodsPerNode(cluster, np string) string { return fmt.Sprintf(` diff --git a/third_party/terraform/utils/node_config.go.erb b/third_party/terraform/utils/node_config.go.erb index 78cfb8b67b5e..6be3362a36d7 100644 --- a/third_party/terraform/utils/node_config.go.erb +++ b/third_party/terraform/utils/node_config.go.erb @@ -314,7 +314,8 @@ func expandNodeConfig(v interface{}) *containerBeta.NodeConfig { if v, ok := nodeConfig["min_cpu_platform"]; ok { nc.MinCpuPlatform = v.(string) } -<% unless version.nil? || version == 'ga' -%> + +<% unless version == 'ga' -%> if v, ok := nodeConfig["taint"]; ok && len(v.([]interface{})) > 0 { taints := v.([]interface{}) nodeTaints := make([]*containerBeta.NodeTaint, 0, len(taints)) @@ -329,7 +330,9 @@ func expandNodeConfig(v interface{}) *containerBeta.NodeConfig { } nc.Taints = nodeTaints } +<% end -%> +<% unless version == 'ga' -%> if v, ok := nodeConfig["workload_metadata_config"]; ok && len(v.([]interface{})) > 0 { conf := v.([]interface{})[0].(map[string]interface{}) nc.WorkloadMetadataConfig = &containerBeta.WorkloadMetadataConfig{ @@ -368,7 +371,7 @@ func flattenNodeConfig(c *containerBeta.NodeConfig) []map[string]interface{} { "tags": c.Tags, "preemptible": c.Preemptible, "min_cpu_platform": c.MinCpuPlatform, -<% unless version.nil? || version == 'ga' -%> +<% unless version == 'ga' -%> "taint": flattenTaints(c.Taints), "workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig), "sandbox_config": flattenSandboxConfig(c.SandboxConfig), diff --git a/third_party/terraform/website/docs/r/container_cluster.html.markdown b/third_party/terraform/website/docs/r/container_cluster.html.markdown index d8062ab49598..57c49b585a42 100644 --- a/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -129,11 +129,9 @@ throughout the region. `region` has been deprecated in favour of `location`. the provider zone is used to create a zonal cluster. * `node_locations` - (Optional) The list of zones in which the cluster's nodes -should be located. These must be in the same region as the cluster zone for -zonal clusters, or in the region of a regional cluster. In a multi-zonal cluster, -the number of nodes specified in `initial_node_count` is created in -all specified zones as well as the primary zone. If specified for a regional -cluster, nodes will be created in only these zones. +are located. Nodes must be in the region of their regional cluster or in the +same region as their cluster's zone for zonal clusters. If this is specified for +a zonal cluster, omit the cluster's zone. -> A "multi-zonal" cluster is a zonal cluster with at least one additional zone defined; in a multi-zonal cluster, the cluster master is only present in a @@ -194,10 +192,11 @@ for more details. Structure is documented below. Defaults to `false` * `initial_node_count` - (Optional) The number of nodes to create in this - cluster's default node pool. Must be set if `node_pool` is not set. If - you're using `google_container_node_pool` objects with no default node pool, - you'll need to set this to a value of at least `1`, alongside setting - `remove_default_node_pool` to `true`. +cluster's default node pool. In regional or multi-zonal clusters, this is the +number of nodes per zone. Must be set if `node_pool` is not set. If you're using +`google_container_node_pool` objects with no default node pool, you'll need to +set this to a value of at least `1`, alongside setting +`remove_default_node_pool` to `true`. * `ip_allocation_policy` - (Optional) Configuration for cluster IP allocation. As of now, only pre-allocated subnetworks (custom type with secondary ranges) are supported. This will activate IP aliases. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/ip-aliases) diff --git a/third_party/terraform/website/docs/r/container_node_pool.html.markdown b/third_party/terraform/website/docs/r/container_node_pool.html.markdown index 3e94371333b7..eec49756905e 100644 --- a/third_party/terraform/website/docs/r/container_node_pool.html.markdown +++ b/third_party/terraform/website/docs/r/container_node_pool.html.markdown @@ -98,12 +98,11 @@ resource "google_container_cluster" "primary" { ## Argument Reference -* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters. +* `cluster` - (Required) The cluster to create the node pool for. Cluster must be present in `zone` provided for zonal clusters. - - - -* `location` - (Optional) The location (region or zone) in which the cluster -resides. +* `location` - (Optional) The location (region or zone) of the cluster. * `zone` - (Optional, Deprecated) The zone in which the cluster resides. `zone` has been deprecated in favor of `location`. @@ -119,8 +118,9 @@ type-specific `region` for regional clusters / `zone` for zonal clusters. * `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage. Structure is documented below. -* `initial_node_count` - (Optional) The initial node count for the pool. Changing this will force - recreation of the resource. +* `initial_node_count` - (Optional) The initial number of nodes for the pool. In +regional or multi-zonal clusters, this is the number of nodes per zone. Changing +this will force recreation of the resource. * `management` - (Optional) Node management configuration, wherein auto-repair and auto-upgrade is configured. Structure is documented below. @@ -131,6 +131,16 @@ type-specific `region` for regional clusters / `zone` for zonal clusters. See the [official documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr) for more information. +* `node_locations` - (Optional, [Beta](https://terraform.io/docs/providers/google/provider_versions.html)) +The list of zones in which the node pool's nodes should be located. Nodes must +be in the region of their regional cluster or in the same region as their +cluster's zone for zonal clusters. If unspecified, the cluster-level +`node_locations` will be used. + +-> Note: `node_locations` will not revert to the cluster's default set of zones +upon being unset. You must manually reconcile the list of zones with your +cluster. + * `name` - (Optional) The name of the node pool. If left blank, Terraform will auto-generate a unique name.