Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

r/kubernetes_cluster: autoscaling-related improvements #4256

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 50 additions & 40 deletions azurerm/resource_arm_kubernetes_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ func resourceArmKubernetesClusterCreate(d *schema.ResourceData, meta interface{}
kubernetesVersion := d.Get("kubernetes_version").(string)

linuxProfile := expandKubernetesClusterLinuxProfile(d)
agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d)
agentProfiles, err := expandKubernetesClusterAgentPoolProfiles([]containerservice.ManagedClusterAgentPoolProfile{}, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -788,7 +788,16 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}
kubernetesVersion := d.Get("kubernetes_version").(string)

linuxProfile := expandKubernetesClusterLinuxProfile(d)
agentProfiles, err := expandKubernetesClusterAgentPoolProfiles(d)

read, err := client.Get(ctx, resourceGroup, name)
if err != nil {
return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}
agentProfiles := []containerservice.ManagedClusterAgentPoolProfile{}
if read.ManagedClusterProperties != nil && read.ManagedClusterProperties.AgentPoolProfiles != nil {
agentProfiles = *read.ManagedClusterProperties.AgentPoolProfiles
}
agentProfiles, err = expandKubernetesClusterAgentPoolProfiles(agentProfiles, d)
if err != nil {
return err
}
Expand Down Expand Up @@ -840,11 +849,6 @@ func resourceArmKubernetesClusterUpdate(d *schema.ResourceData, meta interface{}
return fmt.Errorf("Error waiting for update of Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

read, err := client.Get(ctx, resourceGroup, name)
if err != nil {
return fmt.Errorf("Error retrieving Managed Kubernetes Cluster %q (Resource Group %q): %+v", name, resourceGroup, err)
}

if read.ID == nil {
return fmt.Errorf("Cannot read ID for Managed Kubernetes Cluster %q (Resource Group %q)", name, resourceGroup)
}
Expand Down Expand Up @@ -1183,73 +1187,79 @@ func flattenKubernetesClusterAddonProfiles(profile map[string]*containerservice.
return []interface{}{values}
}

func expandKubernetesClusterAgentPoolProfiles(d *schema.ResourceData) ([]containerservice.ManagedClusterAgentPoolProfile, error) {
func expandKubernetesClusterAgentPoolProfiles(profiles []containerservice.ManagedClusterAgentPoolProfile, d *schema.ResourceData) ([]containerservice.ManagedClusterAgentPoolProfile, error) {
configs := d.Get("agent_pool_profile").([]interface{})

profiles := make([]containerservice.ManagedClusterAgentPoolProfile, 0)
for config_id := range configs {
// If given list of profiles is smaller than configured, append empty profile and we feed it later
if len(profiles) <= config_id {
profiles = append(profiles, containerservice.ManagedClusterAgentPoolProfile{})
}
config := configs[config_id].(map[string]interface{})

name := config["name"].(string)
poolType := config["type"].(string)
count := int32(config["count"].(int))
vmSize := config["vm_size"].(string)
osDiskSizeGB := int32(config["os_disk_size_gb"].(int))
osType := config["os_type"].(string)
profiles[config_id].Name = utils.String(config["name"].(string))
profiles[config_id].Type = containerservice.AgentPoolType(config["type"].(string))

profile := containerservice.ManagedClusterAgentPoolProfile{
Name: utils.String(name),
Type: containerservice.AgentPoolType(poolType),
Count: utils.Int32(count),
VMSize: containerservice.VMSizeTypes(vmSize),
OsDiskSizeGB: utils.Int32(osDiskSizeGB),
OsType: containerservice.OSType(osType),
// if count field is not configured, don't reset it, since it triggers pool update.
// if we update the pool, previous value will be used.
if config["count"] != nil {
profiles[config_id].Count = utils.Int32(int32(config["count"].(int)))
}
profiles[config_id].VMSize = containerservice.VMSizeTypes(config["vm_size"].(string))
profiles[config_id].OsDiskSizeGB = utils.Int32(int32(config["os_disk_size_gb"].(int)))
profiles[config_id].OsType = containerservice.OSType(config["os_type"].(string))

if maxPods := int32(config["max_pods"].(int)); maxPods > 0 {
profile.MaxPods = utils.Int32(maxPods)
profiles[config_id].MaxPods = utils.Int32(maxPods)
}

vnetSubnetID := config["vnet_subnet_id"].(string)
if vnetSubnetID != "" {
profile.VnetSubnetID = utils.String(vnetSubnetID)
profiles[config_id].VnetSubnetID = utils.String(vnetSubnetID)
}

if maxCount := int32(config["max_count"].(int)); maxCount > 0 {
profile.MaxCount = utils.Int32(maxCount)
maxCount := int32(config["max_count"].(int))
if maxCount > 0 {
profiles[config_id].MaxCount = utils.Int32(maxCount)
}

if minCount := int32(config["min_count"].(int)); minCount > 0 {
profile.MinCount = utils.Int32(minCount)
minCount := int32(config["min_count"].(int))
if minCount > 0 {
profiles[config_id].MinCount = utils.Int32(minCount)
}

if enableAutoScalingItf := config["enable_auto_scaling"]; enableAutoScalingItf != nil {
profile.EnableAutoScaling = utils.Bool(enableAutoScalingItf.(bool))
profiles[config_id].EnableAutoScaling = utils.Bool(enableAutoScalingItf.(bool))
}

// Auto scaling will change the number of nodes, but the original count number should not be sent again.
// This avoid the cluster being resized after creation.
if *profile.EnableAutoScaling && !d.IsNewResource() {
profile.Count = nil
}
// If configuration disables autoscaling, reset MaxCount and MinCount fields received from API, so we can gracefully disable it
// However, if max_count or min_count is defined in configuration, we throw error later, since it is invalid user configuration
if !*profiles[config_id].EnableAutoScaling && (profiles[config_id].MaxCount != nil && maxCount <= 0) {
profiles[config_id].MaxCount = nil
}
if !*profiles[config_id].EnableAutoScaling && (profiles[config_id].MinCount != nil && minCount <= 0) {
profiles[config_id].MinCount = nil
}

if availavilityZones := utils.ExpandStringSlice(config["availability_zones"].([]interface{})); len(*availavilityZones) > 0 {
profile.AvailabilityZones = availavilityZones
profiles[config_id].AvailabilityZones = availavilityZones
}

if *profile.EnableAutoScaling && (profile.MinCount == nil || profile.MaxCount == nil) {
// TODO this validation should be done before we attempt to create/modify the cluster
if *profiles[config_id].EnableAutoScaling && (profiles[config_id].MinCount == nil || profiles[config_id].MaxCount == nil) {
return nil, fmt.Errorf("Can't create an AKS cluster with autoscaling enabled but not setting min_count or max_count")
}
if !*profiles[config_id].EnableAutoScaling && (profiles[config_id].MinCount != nil || profiles[config_id].MaxCount != nil) {
return nil, fmt.Errorf("Can't create an AKS cluster with autoscaling disabled with set min_count or max_count settings")
}

if nodeTaints := utils.ExpandStringSlice(config["node_taints"].([]interface{})); len(*nodeTaints) > 0 {
profile.NodeTaints = nodeTaints
profiles[config_id].NodeTaints = nodeTaints
}

if enableNodePublicIP := config["enable_node_public_ip"]; enableNodePublicIP != nil {
profile.EnableNodePublicIP = utils.Bool(enableNodePublicIP.(bool))
profiles[config_id].EnableNodePublicIP = utils.Bool(enableNodePublicIP.(bool))
}

profiles = append(profiles, profile)
}

return profiles, nil
Expand Down
11 changes: 11 additions & 0 deletions website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,17 @@ A `agent_pool_profile` block supports the following:

* `count` - (Optional) Number of Agents (VMs) in the Pool. Possible values must be in the range of 1 to 100 (inclusive). Defaults to `1`.

~> **NOTE:** When `enable_auto_scaling` parameter is set to true, this value will dynamically change depending on the cluster's load. It is recommended to ignore changes to it with autoscaling enabled, since autoscaled clusters cannot be scaled manually. This can be done by adding following lifecycle hook to the resource for each agent pool profile with autoscaling enabled.
```
resource "azurerm_kubernetes_cluster" "test" {
...
lifecycle {
ignore_changes = [
"agent_pool_profile.0.count"
]
}
}
```
* `vm_size` - (Required) The size of each VM in the Agent Pool (e.g. `Standard_F1`). Changing this forces a new resource to be created.

* `availability_zones` - (Optional) Availability zones for nodes. The property `type` of the `agent_pool_profile` must be set to `VirtualMachineScaleSets` in order to use availability zones.
Expand Down