From 599b7a96abbba26f8d2b43a5f64c4f428f78193f Mon Sep 17 00:00:00 2001 From: Nathan McKinley Date: Thu, 24 Oct 2019 20:37:11 +0000 Subject: [PATCH] Add the ability to use the GKE recurring maintenance window in beta. Signed-off-by: Modular Magician --- google/resource_container_cluster.go | 80 ++++++++++++------- google/resource_container_cluster_test.go | 4 +- .../docs/r/container_cluster.html.markdown | 22 ++++- 3 files changed, 75 insertions(+), 31 deletions(-) diff --git a/google/resource_container_cluster.go b/google/resource_container_cluster.go index a3d881be891..a8976db5c4a 100644 --- a/google/resource_container_cluster.go +++ b/google/resource_container_cluster.go @@ -311,6 +311,7 @@ func resourceContainerCluster() *schema.Resource { "daily_maintenance_window": { Type: schema.TypeList, Required: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -763,7 +764,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster := &containerBeta.Cluster{ Name: clusterName, InitialNodeCount: int64(d.Get("initial_node_count").(int)), - MaintenancePolicy: expandMaintenancePolicy(d.Get("maintenance_policy")), + MaintenancePolicy: expandMaintenancePolicy(d, meta), MasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(d.Get("master_authorized_networks_config")), InitialClusterVersion: d.Get("min_master_version").(string), ClusterIpv4Cidr: d.Get("cluster_ipv4_cidr").(string), @@ -1117,15 +1118,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } if d.HasChange("maintenance_policy") { - var req *containerBeta.SetMaintenancePolicyRequest - if mp, ok := d.GetOk("maintenance_policy"); ok { - req = &containerBeta.SetMaintenancePolicyRequest{ - MaintenancePolicy: expandMaintenancePolicy(mp), - } - } else { - req = &containerBeta.SetMaintenancePolicyRequest{ - NullFields: []string{"MaintenancePolicy"}, - } + req := &containerBeta.SetMaintenancePolicyRequest{ + MaintenancePolicy: expandMaintenancePolicy(d, meta), } updateF := func() error { @@ -1736,22 +1730,48 @@ func expandIPAllocationPolicy(configured interface{}) *containerBeta.IPAllocatio } } -func expandMaintenancePolicy(configured interface{}) *containerBeta.MaintenancePolicy { +func expandMaintenancePolicy(d *schema.ResourceData, meta interface{}) *containerBeta.MaintenancePolicy { + config := meta.(*Config) + // We have to perform a full Get() as part of this, to get the fingerprint. We can't do this + // at any other time, because the fingerprint update might happen between plan and apply. + // We can omit error checks, since to have gotten this far, a project is definitely configured. + project, _ := getProject(d, config) + location, _ := getLocation(d, config) + clusterName := d.Get("name").(string) + name := containerClusterFullName(project, location, clusterName) + cluster, _ := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do() + resourceVersion := "" + // If the cluster doesn't exist or if there is a read error of any kind, we will pass in an empty + // resourceVersion. If there happens to be a change to maintenance policy, we will fail at that + // point. This is a compromise between code cleanliness and a slightly worse user experience in + // an unlikely error case - we choose code cleanliness. + if cluster != nil && cluster.MaintenancePolicy != nil { + resourceVersion = cluster.MaintenancePolicy.ResourceVersion + } + + configured := d.Get("maintenance_policy") l := configured.([]interface{}) if len(l) == 0 || l[0] == nil { - return nil + return &containerBeta.MaintenancePolicy{ + ResourceVersion: resourceVersion, + } } - maintenancePolicy := l[0].(map[string]interface{}) - dailyMaintenanceWindow := maintenancePolicy["daily_maintenance_window"].([]interface{})[0].(map[string]interface{}) - startTime := dailyMaintenanceWindow["start_time"].(string) - return &containerBeta.MaintenancePolicy{ - Window: &containerBeta.MaintenanceWindow{ - DailyMaintenanceWindow: &containerBeta.DailyMaintenanceWindow{ - StartTime: startTime, + + if dailyMaintenanceWindow, ok := maintenancePolicy["daily_maintenance_window"]; ok && len(dailyMaintenanceWindow.([]interface{})) > 0 { + dmw := dailyMaintenanceWindow.([]interface{})[0].(map[string]interface{}) + startTime := dmw["start_time"].(string) + return &containerBeta.MaintenancePolicy{ + Window: &containerBeta.MaintenanceWindow{ + DailyMaintenanceWindow: &containerBeta.DailyMaintenanceWindow{ + StartTime: startTime, + }, }, - }, + ResourceVersion: resourceVersion, + } } + + return nil } func expandMasterAuth(configured interface{}) *containerBeta.MasterAuth { @@ -1969,19 +1989,23 @@ func flattenIPAllocationPolicy(c *containerBeta.Cluster, d *schema.ResourceData, } func flattenMaintenancePolicy(mp *containerBeta.MaintenancePolicy) []map[string]interface{} { - if mp == nil || mp.Window == nil || mp.Window.DailyMaintenanceWindow == nil { + if mp == nil || mp.Window == nil { return nil } - return []map[string]interface{}{ - { - "daily_maintenance_window": []map[string]interface{}{ - { - "start_time": mp.Window.DailyMaintenanceWindow.StartTime, - "duration": mp.Window.DailyMaintenanceWindow.Duration, + if mp.Window.DailyMaintenanceWindow != nil { + return []map[string]interface{}{ + { + "daily_maintenance_window": []map[string]interface{}{ + { + "start_time": mp.Window.DailyMaintenanceWindow.StartTime, + "duration": mp.Window.DailyMaintenanceWindow.Duration, + }, }, }, - }, + } } + + return nil } func flattenMasterAuth(ma *containerBeta.MasterAuth) []map[string]interface{} { diff --git a/google/resource_container_cluster_test.go b/google/resource_container_cluster_test.go index 2b562877bbc..f89c2e62975 100644 --- a/google/resource_container_cluster_test.go +++ b/google/resource_container_cluster_test.go @@ -2355,8 +2355,8 @@ resource "google_container_cluster" "cidr_error_overlap" { initial_node_count = 1 ip_allocation_policy { - cluster_ipv4_cidr_block = "10.0.0.0/16" - services_ipv4_cidr_block = "10.1.0.0/16" + cluster_ipv4_cidr_block = "10.0.0.0/16" + services_ipv4_cidr_block = "10.1.0.0/16" } } `, initConfig, secondCluster) diff --git a/website/docs/r/container_cluster.html.markdown b/website/docs/r/container_cluster.html.markdown index 6193ac57f9f..9427dedae7d 100644 --- a/website/docs/r/container_cluster.html.markdown +++ b/website/docs/r/container_cluster.html.markdown @@ -402,7 +402,7 @@ The `authenticator_groups_config` block supports: The `maintenance_policy` block supports: -* `daily_maintenance_window` - (Required) Time window specified for daily maintenance operations. +* `daily_maintenance_window` - (Required in GA, Optional in Beta) Time window specified for daily maintenance operations. Specify `start_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MM”, where HH : \[00-23\] and MM : \[00-59\] GMT. For example: @@ -414,6 +414,26 @@ maintenance_policy { } ``` +* `recurring_window` - (Optional, [Beta](https://terraform.io/docs/providers/google/provider_versions.html)) Time window for +recurring maintenance operations. + +Specify `start_time` and `end_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) date format. The start time's date is +the initial date that the window starts, and the end time is used for calculating duration. Specify `recurrence` in +[RFC5545](https://tools.ietf.org/html/rfc5545#section-3.8.5.3) RRULE format, to specify when this recurs. + +For example: +``` +maintenance_policy { + recurring_window { + start_time = "2019-01-01T03:00" + end_time = "2019-01-01T06:00" + recurrence = "FREQ=DAILY" + } +} +``` + +In beta, one or the other of `recurring_window` and `daily_maintenance_window` is required if a `maintenance_policy` block is supplied. + The `ip_allocation_policy` block supports: * `use_ip_aliases` - (Optional) Whether alias IPs will be used for pod IPs in