From eb3874785ad6b6eef1b8d2bad5b3ddf0a00f18ae Mon Sep 17 00:00:00 2001 From: The Magician Date: Tue, 24 Oct 2023 08:48:55 -0700 Subject: [PATCH] Maintenance interval for sole-tenant node groups. (#9307) (#6561) [upstream:a67d6a338c538d6ee383540f734b748e72ce0c84] Signed-off-by: Modular Magician --- .changelog/9307.txt | 3 ++ .../compute/resource_compute_node_group.go | 36 ++++++++++++++ ...ource_compute_node_group_generated_test.go | 48 +++++++++++++++++++ .../docs/r/compute_node_group.html.markdown | 35 ++++++++++++++ 4 files changed, 122 insertions(+) create mode 100644 .changelog/9307.txt diff --git a/.changelog/9307.txt b/.changelog/9307.txt new file mode 100644 index 0000000000..bcc3533749 --- /dev/null +++ b/.changelog/9307.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +compute: added `maintenance_interval` field to `google_compute_node_group` resource (beta) +``` diff --git a/google-beta/services/compute/resource_compute_node_group.go b/google-beta/services/compute/resource_compute_node_group.go index 3f30607fdb..7f0ac6c70c 100644 --- a/google-beta/services/compute/resource_compute_node_group.go +++ b/google-beta/services/compute/resource_compute_node_group.go @@ -112,6 +112,15 @@ than or equal to max-nodes. The default value is 0.`, Optional: true, Description: `The initial number of nodes in the node group. One of 'initial_size' or 'autoscaling_policy' must be configured on resource creation.`, }, + "maintenance_interval": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"AS_NEEDED", "RECURRENT", ""}), + Description: `Specifies the frequency of planned maintenance events. Set to one of the following: + - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. + - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. Possible values: ["AS_NEEDED", "RECURRENT"]`, + }, "maintenance_policy": { Type: schema.TypeString, Optional: true, @@ -255,6 +264,12 @@ func resourceComputeNodeGroupCreate(d *schema.ResourceData, meta interface{}) er } else if v, ok := d.GetOkExists("share_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(shareSettingsProp)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { obj["shareSettings"] = shareSettingsProp } + maintenanceIntervalProp, err := expandComputeNodeGroupMaintenanceInterval(d.Get("maintenance_interval"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_interval"); !tpgresource.IsEmptyValue(reflect.ValueOf(maintenanceIntervalProp)) && (ok || !reflect.DeepEqual(v, maintenanceIntervalProp)) { + obj["maintenanceInterval"] = maintenanceIntervalProp + } zoneProp, err := expandComputeNodeGroupZone(d.Get("zone"), d, config) if err != nil { return err @@ -395,6 +410,9 @@ func resourceComputeNodeGroupRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("share_settings", flattenComputeNodeGroupShareSettings(res["shareSettings"], d, config)); err != nil { return fmt.Errorf("Error reading NodeGroup: %s", err) } + if err := d.Set("maintenance_interval", flattenComputeNodeGroupMaintenanceInterval(res["maintenanceInterval"], d, config)); err != nil { + return fmt.Errorf("Error reading NodeGroup: %s", err) + } if err := d.Set("zone", flattenComputeNodeGroupZone(res["zone"], d, config)); err != nil { return fmt.Errorf("Error reading NodeGroup: %s", err) } @@ -457,6 +475,12 @@ func resourceComputeNodeGroupUpdate(d *schema.ResourceData, meta interface{}) er } else if v, ok := d.GetOkExists("share_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, shareSettingsProp)) { obj["shareSettings"] = shareSettingsProp } + maintenanceIntervalProp, err := expandComputeNodeGroupMaintenanceInterval(d.Get("maintenance_interval"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("maintenance_interval"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, maintenanceIntervalProp)) { + obj["maintenanceInterval"] = maintenanceIntervalProp + } zoneProp, err := expandComputeNodeGroupZone(d.Get("zone"), d, config) if err != nil { return err @@ -496,6 +520,10 @@ func resourceComputeNodeGroupUpdate(d *schema.ResourceData, meta interface{}) er updateMask = append(updateMask, "shareSettings") } + if d.HasChange("maintenance_interval") { + updateMask = append(updateMask, "maintenanceInterval") + } + if d.HasChange("zone") { updateMask = append(updateMask, "zone") } @@ -811,6 +839,10 @@ func flattenComputeNodeGroupShareSettingsProjectMapProjectId(v interface{}, d *s return v } +func flattenComputeNodeGroupMaintenanceInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func flattenComputeNodeGroupZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v @@ -965,6 +997,10 @@ func expandComputeNodeGroupShareSettingsProjectMapProjectId(v interface{}, d tpg return v, nil } +func expandComputeNodeGroupMaintenanceInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func expandComputeNodeGroupZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { f, err := tpgresource.ParseGlobalFieldValue("zones", v.(string), "project", d, config, true) if err != nil { diff --git a/google-beta/services/compute/resource_compute_node_group_generated_test.go b/google-beta/services/compute/resource_compute_node_group_generated_test.go index c099915a5d..2882489ab5 100644 --- a/google-beta/services/compute/resource_compute_node_group_generated_test.go +++ b/google-beta/services/compute/resource_compute_node_group_generated_test.go @@ -75,6 +75,54 @@ resource "google_compute_node_group" "nodes" { `, context) } +func TestAccComputeNodeGroup_nodeGroupMaintenanceIntervalExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeNodeGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNodeGroup_nodeGroupMaintenanceIntervalExample(context), + }, + { + ResourceName: "google_compute_node_group.nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"node_template", "initial_size", "zone"}, + }, + }, + }) +} + +func testAccComputeNodeGroup_nodeGroupMaintenanceIntervalExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_node_template" "soletenant-tmpl" { + provider = google-beta + name = "tf-test-soletenant-tmpl%{random_suffix}" + region = "us-central1" + node_type = "c2-node-60-240" +} + +resource "google_compute_node_group" "nodes" { + provider = google-beta + name = "tf-test-soletenant-group%{random_suffix}" + zone = "us-central1-a" + description = "example google_compute_node_group for Terraform Google Provider" + + initial_size = 1 + node_template = google_compute_node_template.soletenant-tmpl.id + + maintenance_interval = "RECURRENT" +} +`, context) +} + func TestAccComputeNodeGroup_nodeGroupAutoscalingPolicyExample(t *testing.T) { t.Parallel() diff --git a/website/docs/r/compute_node_group.html.markdown b/website/docs/r/compute_node_group.html.markdown index 2e5967c54c..adfec0b257 100644 --- a/website/docs/r/compute_node_group.html.markdown +++ b/website/docs/r/compute_node_group.html.markdown @@ -52,6 +52,34 @@ resource "google_compute_node_group" "nodes" { node_template = google_compute_node_template.soletenant-tmpl.id } ``` + +## Example Usage - Node Group Maintenance Interval + + +```hcl +resource "google_compute_node_template" "soletenant-tmpl" { + provider = google-beta + name = "soletenant-tmpl" + region = "us-central1" + node_type = "c2-node-60-240" +} + +resource "google_compute_node_group" "nodes" { + provider = google-beta + name = "soletenant-group" + zone = "us-central1-a" + description = "example google_compute_node_group for Terraform Google Provider" + + initial_size = 1 + node_template = google_compute_node_template.soletenant-tmpl.id + + maintenance_interval = "RECURRENT" +} +```