Skip to content

Commit

Permalink
Revert "Added upgrade_settings attribute to the google_container_clus… (
Browse files Browse the repository at this point in the history
#6809) (#4865)

* Revert "Added upgrade_settings attribute to the google_container_cluster resource and promoted min_cpu_platform to GA (#6771)"

This reverts commit f8db021005ef5a9fd5d0ededd959773e58240402.

* add management back in

* move management to the right spot

Signed-off-by: Modular Magician <[email protected]>

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Nov 10, 2022
1 parent 2152417 commit c384381
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 331 deletions.
3 changes: 3 additions & 0 deletions .changelog/6809.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:none

```
217 changes: 7 additions & 210 deletions google-beta/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"log"
"reflect"
"regexp"
"strconv"
"strings"
"time"

Expand Down Expand Up @@ -551,91 +550,6 @@ func resourceContainerCluster() *schema.Resource {
},
},
},
"upgrade_settings": {
Type: schema.TypeList,
Optional: true,
Description: `Specifies the upgrade settings for NAP created node pools`,
Computed: true,
DiffSuppressFunc: UpgradeSettingsDiffSuppress,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_surge": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: `The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process.`,
},
"max_unavailable": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Description: `The maximum number of nodes that can be simultaneously unavailable during the upgrade process.`,
},
"strategy": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Description: `Update strategy of the node pool.`,
ValidateFunc: validation.StringInSlice([]string{"NODE_POOL_UPDATE_STRATEGY_UNSPECIFIED", "BLUE_GREEN", "SURGE"}, false),
},
"blue_green_settings": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Description: `Settings for blue-green upgrade strategy.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"node_pool_soak_duration": {
Type: schema.TypeString,
Optional: true,
Description: `Time needed after draining entire blue pool. After this period, blue pool will be cleaned up.
A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`,
},
"standard_rollout_policy": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Description: `Standard policy for the blue-green upgrade.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"batch_percentage": {
Type: schema.TypeFloat,
Optional: true,
ValidateFunc: validation.FloatBetween(0.0, 1.0),
ExactlyOneOf: []string{
"cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_percentage",
"cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_node_count",
},
Description: `Percentage of the bool pool nodes to drain in a batch. The range of this field should be (0.0, 1.0].`,
},
"batch_node_count": {
Type: schema.TypeInt,
Optional: true,
ExactlyOneOf: []string{
"cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_percentage",
"cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_node_count",
},
Description: `Number of blue nodes to drain in a batch.`,
},
"batch_soak_duration": {
Type: schema.TypeString,
Optional: true,
Default: "0s",
Description: `Soak time after each batch gets drained.
A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`,
},
},
},
},
},
},
},
},
},
},
"management": {
Type: schema.TypeList,
Optional: true,
Expand Down Expand Up @@ -3838,14 +3752,13 @@ func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceDa
config := l[0].(map[string]interface{})

npd := &container.AutoprovisioningNodePoolDefaults{
OauthScopes: convertStringArr(config["oauth_scopes"].([]interface{})),
ServiceAccount: config["service_account"].(string),
DiskSizeGb: int64(config["disk_size"].(int)),
DiskType: config["disk_type"].(string),
ImageType: config["image_type"].(string),
BootDiskKmsKey: config["boot_disk_kms_key"].(string),
UpgradeSettings: expandUpgradeSettings(config["upgrade_settings"]),
Management: expandManagement(config["management"]),
OauthScopes: convertStringArr(config["oauth_scopes"].([]interface{})),
ServiceAccount: config["service_account"].(string),
DiskSizeGb: int64(config["disk_size"].(int)),
DiskType: config["disk_type"].(string),
ImageType: config["image_type"].(string),
BootDiskKmsKey: config["boot_disk_kms_key"].(string),
Management: expandManagement(config["management"]),
}

if v, ok := config["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 {
Expand All @@ -3862,7 +3775,6 @@ func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceDa
cpu = "automatic"
}
npd.MinCpuPlatform = cpu

return npd
}

Expand Down Expand Up @@ -3897,54 +3809,6 @@ func expandUpgradeOptions(configured interface{}) *container.AutoUpgradeOptions
return upgradeOptions
}

func expandUpgradeSettings(configured interface{}) *container.UpgradeSettings {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
return &container.UpgradeSettings{}
}
config := l[0].(map[string]interface{})

upgradeSettings := &container.UpgradeSettings{
MaxSurge: int64(config["max_surge"].(int)),
MaxUnavailable: int64(config["max_unavailable"].(int)),
Strategy: config["strategy"].(string),
BlueGreenSettings: expandBlueGreenSettings(config["blue_green_settings"]),
}

return upgradeSettings
}

func expandBlueGreenSettings(configured interface{}) *container.BlueGreenSettings {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
return &container.BlueGreenSettings{}
}
config := l[0].(map[string]interface{})

blueGreenSettings := &container.BlueGreenSettings{
NodePoolSoakDuration: config["node_pool_soak_duration"].(string),
StandardRolloutPolicy: expandStandardRolloutPolicy(config["standard_rollout_policy"]),
}

return blueGreenSettings
}

func expandStandardRolloutPolicy(configured interface{}) *container.StandardRolloutPolicy {
l, ok := configured.([]interface{})
if !ok || l == nil || len(l) == 0 || l[0] == nil {
return &container.StandardRolloutPolicy{}
}

config := l[0].(map[string]interface{})
standardRolloutPolicy := &container.StandardRolloutPolicy{
BatchPercentage: config["batch_percentage"].(float64),
BatchNodeCount: int64(config["batch_node_count"].(int)),
BatchSoakDuration: config["batch_soak_duration"].(string),
}

return standardRolloutPolicy
}

func expandAuthenticatorGroupsConfig(configured interface{}) *container.AuthenticatorGroupsConfig {
l := configured.([]interface{})
if len(l) == 0 {
Expand Down Expand Up @@ -4868,7 +4732,6 @@ func flattenAutoProvisioningDefaults(a *container.AutoprovisioningNodePoolDefaul
r["min_cpu_platform"] = a.MinCpuPlatform
r["boot_disk_kms_key"] = a.BootDiskKmsKey
r["shielded_instance_config"] = flattenShieldedInstanceConfig(a.ShieldedInstanceConfig)
r["upgrade_settings"] = flattenUpgradeSettings(a.UpgradeSettings)
r["management"] = flattenManagement(a.Management)

return []map[string]interface{}{r}
Expand Down Expand Up @@ -4898,44 +4761,6 @@ func flattenUpgradeOptions(a *container.AutoUpgradeOptions) []map[string]interfa
return []map[string]interface{}{r}
}

func flattenUpgradeSettings(a *container.UpgradeSettings) []map[string]interface{} {
if a == nil {
return nil
}
r := make(map[string]interface{})
r["max_surge"] = a.MaxSurge
r["max_unavailable"] = a.MaxUnavailable
r["strategy"] = a.Strategy
r["blue_green_settings"] = flattenBlueGreenSettings(a.BlueGreenSettings)

return []map[string]interface{}{r}
}

func flattenBlueGreenSettings(a *container.BlueGreenSettings) []map[string]interface{} {
if a == nil {
return nil
}

r := make(map[string]interface{})
r["node_pool_soak_duration"] = a.NodePoolSoakDuration
r["standard_rollout_policy"] = flattenStandardRolloutPolicy(a.StandardRolloutPolicy)

return []map[string]interface{}{r}
}

func flattenStandardRolloutPolicy(a *container.StandardRolloutPolicy) []map[string]interface{} {
if a == nil {
return nil
}

r := make(map[string]interface{})
r["batch_percentage"] = a.BatchPercentage
r["batch_node_count"] = a.BatchNodeCount
r["batch_soak_duration"] = a.BatchSoakDuration

return []map[string]interface{}{r}
}

func flattenMasterAuthorizedNetworksConfig(c *container.MasterAuthorizedNetworksConfig) []map[string]interface{} {
if c == nil || !c.Enabled {
return nil
Expand Down Expand Up @@ -5333,31 +5158,3 @@ func validateNodePoolAutoConfig(cluster *container.Cluster) error {

return nil
}

func UpgradeSettingsDiffSuppress(k, old, new string, r *schema.ResourceData) bool {
// the number of digits after decimal point may differ for soak_duration in config. ("3.5s" - "3.500s")
// suppress the difference if both floats are equal.
// max_surge and max_unavailable should not be _ideally_ specified with the strategy set to BLUE_GREEN,
// but in case it is specified, suppress it.

nodePoolSoakDuration := "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.node_pool_soak_duration"

o, n := r.GetChange("cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.strategy")

if o == "BLUE_GREEN" && n == "BLUE_GREEN" {
if k == nodePoolSoakDuration {
fs1, fs2 := r.GetChange(nodePoolSoakDuration)

f1, _ := strconv.ParseFloat(strings.Trim(fs1.(string), "s"), 32)
f2, _ := strconv.ParseFloat(strings.Trim(fs2.(string), "s"), 32)

return f1 == f2
}

return k == "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.max_surge" ||
k == "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.max_unavailable"

}

return false
}
Loading

0 comments on commit c384381

Please sign in to comment.