-
Notifications
You must be signed in to change notification settings - Fork 1.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Don't recreate container_cluster when maintenance_window changes #893
Changes from 1 commit
4bf9de1
ef5c04b
f4670d7
be6fc1f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -175,21 +175,21 @@ func resourceContainerCluster() *schema.Resource { | |
"maintenance_policy": { | ||
Type: schema.TypeList, | ||
Optional: true, | ||
ForceNew: true, | ||
ForceNew: false, | ||
MaxItems: 1, | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
"daily_maintenance_window": { | ||
Type: schema.TypeList, | ||
Required: true, | ||
ForceNew: true, | ||
ForceNew: false, | ||
MaxItems: 1, | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
"start_time": { | ||
Type: schema.TypeString, | ||
Required: true, | ||
ForceNew: true, | ||
ForceNew: false, | ||
ValidateFunc: validateRFC3339Time, | ||
DiffSuppressFunc: rfc3339TimeDiffSuppress, | ||
}, | ||
|
@@ -799,6 +799,45 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er | |
} | ||
} | ||
|
||
if d.HasChange("maintenance_policy") { | ||
var req *container.SetMaintenancePolicyRequest | ||
if mp, ok := d.GetOk("maintenance_policy"); ok { | ||
maintenancePolicy := mp.([]interface{})[0].(map[string]interface{}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I won't block on this, but if you wanted you could add an |
||
dailyMaintenanceWindow := maintenancePolicy["daily_maintenance_window"].([]interface{})[0].(map[string]interface{}) | ||
startTime := dailyMaintenanceWindow["start_time"].(string) | ||
|
||
req = &container.SetMaintenancePolicyRequest{ | ||
MaintenancePolicy: &container.MaintenancePolicy{ | ||
Window: &container.MaintenanceWindow{ | ||
DailyMaintenanceWindow: &container.DailyMaintenanceWindow{ | ||
StartTime: startTime, | ||
}, | ||
}, | ||
}, | ||
} | ||
} else { | ||
req = &container.SetMaintenancePolicyRequest{ | ||
NullFields: []string{"MaintenancePolicy"}, | ||
} | ||
} | ||
|
||
op, err := config.clientContainer.Projects.Zones.Clusters.SetMaintenancePolicy( | ||
project, zoneName, clusterName, req).Do() | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Wait until it's updated | ||
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster maintenance policy", timeoutInMinutes, 2) | ||
if waitErr != nil { | ||
return waitErr | ||
} | ||
|
||
log.Printf("[INFO] GKE cluster %s maintenance policy has been updated", d.Id()) | ||
|
||
d.SetPartial("maintenance_policy") | ||
} | ||
|
||
if d.HasChange("additional_zones") { | ||
azSet := d.Get("additional_zones").(*schema.Set) | ||
if azSet.Contains(zoneName) { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -610,14 +610,22 @@ func TestAccContainerCluster_withNodePoolNodeConfig(t *testing.T) { | |
|
||
func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) { | ||
t.Parallel() | ||
clusterName := acctest.RandString(10) | ||
|
||
resource.Test(t, resource.TestCase{ | ||
PreCheck: func() { testAccPreCheck(t) }, | ||
Providers: testAccProviders, | ||
CheckDestroy: testAccCheckContainerClusterDestroy, | ||
Steps: []resource.TestStep{ | ||
{ | ||
Config: testAccContainerCluster_withMaintenanceWindow("03:00"), | ||
Config: testAccContainerCluster_withMaintenanceWindow(clusterName, "03:00"), | ||
Check: resource.ComposeTestCheckFunc( | ||
testAccCheckContainerCluster( | ||
"google_container_cluster.with_maintenance_window"), | ||
), | ||
}, | ||
{ | ||
Config: testAccContainerCluster_withMaintenanceWindow(clusterName, ""), | ||
Check: resource.ComposeTestCheckFunc( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure if this extra step is worth it. I wanted to add something to prove that the cluster had not been torn down & recreated, but couldn't find a neat way to do this. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I also tried to add code to if cluster.MaintenancePolicy == nil {
clusterTests = append(clusterTests, clusterTestField{"maintenance_policy", []string{}})
} else {
clusterTests = append(clusterTests, clusterTestField{"maintenance_policy.0.daily_maintenance_window.0.start_time", cluster.MaintenancePolicy.Window.DailyMaintenanceWindow.StartTime})
clusterTests = append(clusterTests, clusterTestField{"maintenance_policy.0.daily_maintenance_window.0.duration", cluster.MaintenancePolicy.Window.DailyMaintenanceWindow.Duration})
} but it turned out not really to prove anything: if I failed to correctly implement the "update MaintenanceWindow to nil" call, the state passed into the TestCheckFunc was the actual state (i.e. still with a non-nil MaintenanceWindow) so the test passed even though the apply hadn't done what I wanted. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, our test infrastructure definitely leaves something to be desired. I do think having the test to check that you can remove a maintenance window is good to have. For the "check that it was a real update", https://github.com/hashicorp/terraform/issues/15126. And for proving the absence, you could use |
||
testAccCheckContainerCluster( | ||
"google_container_cluster.with_maintenance_window"), | ||
|
@@ -1609,19 +1617,25 @@ resource "google_container_cluster" "with_node_pool_node_config" { | |
`, testId, testId) | ||
} | ||
|
||
func testAccContainerCluster_withMaintenanceWindow(startTime string) string { | ||
func testAccContainerCluster_withMaintenanceWindow(clusterName string, startTime string) string { | ||
maintenancePolicy := "" | ||
if len(startTime) > 0 { | ||
maintenancePolicy = fmt.Sprintf(` | ||
maintenance_policy { | ||
daily_maintenance_window { | ||
start_time = "%s" | ||
} | ||
}`, startTime) | ||
} | ||
|
||
return fmt.Sprintf(` | ||
resource "google_container_cluster" "with_maintenance_window" { | ||
name = "cluster-test-%s" | ||
zone = "us-central1-a" | ||
initial_node_count = 1 | ||
|
||
maintenance_policy { | ||
daily_maintenance_window { | ||
start_time = "%s" | ||
} | ||
} | ||
}`, acctest.RandString(10), startTime) | ||
%s | ||
}`, clusterName, maintenancePolicy) | ||
} | ||
|
||
func testAccContainerCluster_withIPAllocationPolicy(cluster string, ranges, policy map[string]string) string { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is fine, but the pattern we tend to use is to just not set
ForceNew
at all if it's false.