Skip to content

Commit

Permalink
kubernetes_cluster/node_pool- capacity_reservation_group_id (#17395)
Browse files Browse the repository at this point in the history
  • Loading branch information
qiqingzhang authored Jul 6, 2022
1 parent 3044df9 commit 84e5363
Show file tree
Hide file tree
Showing 6 changed files with 226 additions and 29 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,13 @@ func resourceKubernetesClusterNodePool() *pluginsdk.Resource {
},

// Optional
"capacity_reservation_group_id": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: computeValidate.CapacityReservationGroupID,
},

"enable_auto_scaling": {
Type: pluginsdk.TypeBool,
Optional: true,
Expand Down Expand Up @@ -461,6 +468,10 @@ func resourceKubernetesClusterNodePoolCreate(d *pluginsdk.ResourceData, meta int
profile.VnetSubnetID = utils.String(vnetSubnetID)
}

if capacityReservationGroupId := d.Get("capacity_reservation_group_id").(string); capacityReservationGroupId != "" {
profile.CapacityReservationGroupID = utils.String(capacityReservationGroupId)
}

maxCount := d.Get("max_count").(int)
minCount := d.Get("min_count").(int)

Expand Down Expand Up @@ -827,6 +838,7 @@ func resourceKubernetesClusterNodePoolRead(d *pluginsdk.ResourceData, meta inter

d.Set("vnet_subnet_id", props.VnetSubnetID)
d.Set("vm_size", props.VMSize)
d.Set("capacity_reservation_group_id", props.CapacityReservationGroupID)

if err := d.Set("upgrade_settings", flattenUpgradeSettings(props.UpgradeSettings)); err != nil {
return fmt.Errorf("setting `upgrade_settings`: %+v", err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,21 @@ func TestAccKubernetesClusterNodePool_availabilityZones(t *testing.T) {
})
}

func TestAccKubernetesClusterNodePool_capacityReservationGroup(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
r := KubernetesClusterNodePoolResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.capacityReservationGroup(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
})
}

func TestAccKubernetesClusterNodePool_errorForAvailabilitySet(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster_node_pool", "test")
r := KubernetesClusterNodePoolResource{}
Expand Down Expand Up @@ -1118,6 +1133,77 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger)
}

func (KubernetesClusterNodePoolResource) capacityReservationGroup(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%[1]d"
location = "%[2]s"
}
resource "azurerm_capacity_reservation_group" "test" {
name = "acctest-ccrg-%[1]d"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}
resource "azurerm_capacity_reservation" "test" {
name = "acctest-ccr-%[1]d"
capacity_reservation_group_id = azurerm_capacity_reservation_group.test.id
sku {
name = "Standard_D2s_v3"
capacity = 2
}
}
resource "azurerm_user_assigned_identity" "test" {
name = "acctest%[1]d"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}
resource "azurerm_role_assignment" "test" {
scope = azurerm_capacity_reservation_group.test.id
principal_id = azurerm_user_assigned_identity.test.principal_id
role_definition_name = "Owner"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%[1]d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[1]d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2s_v3"
capacity_reservation_group_id = azurerm_capacity_reservation.test.capacity_reservation_group_id
}
identity {
type = "UserAssigned"
identity_ids = [azurerm_user_assigned_identity.test.id]
}
depends_on = [
azurerm_capacity_reservation.test,
azurerm_role_assignment.test
]
}
resource "azurerm_kubernetes_cluster_node_pool" "test" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id
vm_size = "Standard_D2s_v3"
node_count = 1
capacity_reservation_group_id = azurerm_capacity_reservation.test.capacity_reservation_group_id
}
`, data.RandomInteger, data.Locations.Primary)
}

func (r KubernetesClusterNodePoolResource) manualScaleConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,21 @@ func TestAccKubernetesCluster_basicMaintenanceConfig(t *testing.T) {
})
}

func TestAccKubernetesCluster_capacityReservationGroup(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.capacityReservationGroup(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
})
}

func TestAccKubernetesCluster_completeMaintenanceConfig(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}
Expand Down Expand Up @@ -1731,6 +1746,70 @@ resource "azurerm_kubernetes_cluster" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger)
}

func (KubernetesClusterResource) capacityReservationGroup(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%[1]d"
location = "%[2]s"
}
resource "azurerm_capacity_reservation_group" "test" {
name = "acctest-ccrg-%[1]d"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}
resource "azurerm_capacity_reservation" "test" {
name = "acctest-ccr-%[1]d"
capacity_reservation_group_id = azurerm_capacity_reservation_group.test.id
sku {
name = "Standard_D2s_v3"
capacity = 2
}
}
resource "azurerm_user_assigned_identity" "test" {
name = "acctest%[1]d"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
}
resource "azurerm_role_assignment" "test" {
scope = azurerm_capacity_reservation_group.test.id
principal_id = azurerm_user_assigned_identity.test.principal_id
role_definition_name = "Owner"
}
resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%[1]d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[1]d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2s_v3"
capacity_reservation_group_id = azurerm_capacity_reservation.test.capacity_reservation_group_id
}
identity {
type = "UserAssigned"
identity_ids = [azurerm_user_assigned_identity.test.id]
}
depends_on = [
azurerm_capacity_reservation.test,
azurerm_role_assignment.test
]
}
`, data.RandomInteger, data.Locations.Primary)
}

func (KubernetesClusterResource) completeMaintenanceConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down
74 changes: 45 additions & 29 deletions internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,13 @@ func SchemaDefaultNodePool() *pluginsdk.Schema {
ValidateFunc: validation.StringIsNotEmpty,
},

"capacity_reservation_group_id": {
Type: pluginsdk.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: computeValidate.CapacityReservationGroupID,
},

// TODO 4.0: change this from enable_* to *_enabled
"enable_auto_scaling": {
Type: pluginsdk.TypeBool,
Expand Down Expand Up @@ -709,6 +716,10 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]containerservice.Manag
profile.ProximityPlacementGroupID = utils.String(proximityPlacementGroupId)
}

if capacityReservationGroupId := raw["capacity_reservation_group_id"].(string); capacityReservationGroupId != "" {
profile.CapacityReservationGroupID = utils.String(capacityReservationGroupId)
}

count := raw["node_count"].(int)
maxCount := raw["max_count"].(int)
minCount := raw["min_count"].(int)
Expand Down Expand Up @@ -1055,6 +1066,10 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
if agentPool.VMSize != nil {
vmSize = *agentPool.VMSize
}
capacityReservationGroupId := ""
if agentPool.CapacityReservationGroupID != nil {
capacityReservationGroupId = *agentPool.CapacityReservationGroupID
}

upgradeSettings := flattenUpgradeSettings(agentPool.UpgradeSettings)
linuxOSConfig, err := flattenAgentPoolLinuxOSConfig(agentPool.LinuxOSConfig)
Expand All @@ -1063,35 +1078,36 @@ func FlattenDefaultNodePool(input *[]containerservice.ManagedClusterAgentPoolPro
}

out := map[string]interface{}{
"enable_auto_scaling": enableAutoScaling,
"enable_node_public_ip": enableNodePublicIP,
"enable_host_encryption": enableHostEncryption,
"fips_enabled": enableFIPS,
"kubelet_disk_type": string(agentPool.KubeletDiskType),
"max_count": maxCount,
"max_pods": maxPods,
"min_count": minCount,
"name": name,
"node_count": count,
"node_labels": nodeLabels,
"node_public_ip_prefix_id": nodePublicIPPrefixID,
"node_taints": []string{},
"os_disk_size_gb": osDiskSizeGB,
"os_disk_type": string(osDiskType),
"os_sku": string(agentPool.OsSKU),
"tags": tags.Flatten(agentPool.Tags),
"type": string(agentPool.Type),
"ultra_ssd_enabled": enableUltraSSD,
"vm_size": vmSize,
"pod_subnet_id": podSubnetId,
"orchestrator_version": orchestratorVersion,
"proximity_placement_group_id": proximityPlacementGroupId,
"upgrade_settings": upgradeSettings,
"vnet_subnet_id": vnetSubnetId,
"only_critical_addons_enabled": criticalAddonsEnabled,
"kubelet_config": flattenAgentPoolKubeletConfig(agentPool.KubeletConfig),
"linux_os_config": linuxOSConfig,
"zones": zones.Flatten(agentPool.AvailabilityZones),
"enable_auto_scaling": enableAutoScaling,
"enable_node_public_ip": enableNodePublicIP,
"enable_host_encryption": enableHostEncryption,
"fips_enabled": enableFIPS,
"kubelet_disk_type": string(agentPool.KubeletDiskType),
"max_count": maxCount,
"max_pods": maxPods,
"min_count": minCount,
"name": name,
"node_count": count,
"node_labels": nodeLabels,
"node_public_ip_prefix_id": nodePublicIPPrefixID,
"node_taints": []string{},
"os_disk_size_gb": osDiskSizeGB,
"os_disk_type": string(osDiskType),
"os_sku": string(agentPool.OsSKU),
"tags": tags.Flatten(agentPool.Tags),
"type": string(agentPool.Type),
"ultra_ssd_enabled": enableUltraSSD,
"vm_size": vmSize,
"pod_subnet_id": podSubnetId,
"orchestrator_version": orchestratorVersion,
"proximity_placement_group_id": proximityPlacementGroupId,
"upgrade_settings": upgradeSettings,
"vnet_subnet_id": vnetSubnetId,
"only_critical_addons_enabled": criticalAddonsEnabled,
"kubelet_config": flattenAgentPoolKubeletConfig(agentPool.KubeletConfig),
"linux_os_config": linuxOSConfig,
"zones": zones.Flatten(agentPool.AvailabilityZones),
"capacity_reservation_group_id": capacityReservationGroupId,
}

return &[]interface{}{
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,8 @@ A `default_node_pool` block supports the following:

* `vm_size` - (Required) The size of the Virtual Machine, such as `Standard_DS2_v2`. Changing this forces a new resource to be created.

* `capacity_reservation_group_id` - (Optional) Specifies the ID of the Capacity Reservation Group within which this AKS Cluster should be created. Changing this forces a new resource to be created.

* `enable_auto_scaling` - (Optional) Should [the Kubernetes Auto Scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler) be enabled for this Node Pool? Defaults to `false`.

-> **Note:** This requires that the `type` is set to `VirtualMachineScaleSets`.
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster_node_pool.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ The following arguments are supported:

---

* `capacity_reservation_group_id` - (Optional) Specifies the ID of the Capacity Reservation Group where this Node Pool should exist. Changing this forces a new resource to be created.

* `enable_auto_scaling` - (Optional) Whether to enable [auto-scaler](https://docs.microsoft.com/azure/aks/cluster-autoscaler). Defaults to `false`.

* `enable_host_encryption` - (Optional) Should the nodes in this Node Pool have host encryption enabled? Defaults to `false`.
Expand Down

0 comments on commit 84e5363

Please sign in to comment.