Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_kubernetes_cluster - support for the default_node_pool.snapshot_id property #22708

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,19 @@
package containers_test

import (
"context"
"fmt"
"strings"
"testing"

"github.com/hashicorp/go-azure-helpers/resourcemanager/commonids"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/agentpools"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/snapshots"
"github.com/hashicorp/terraform-plugin-testing/terraform"
"github.com/hashicorp/terraform-provider-azurerm/internal/acceptance"
"github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/utils"
)

func TestAccKubernetesCluster_basicAvailabilitySet(t *testing.T) {
Expand Down Expand Up @@ -972,6 +978,67 @@ func TestAccKubernetesCluster_customCaTrustCerts(t *testing.T) {
})
}

func TestAccKubernetesCluster_snapshotId(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.snapshotSource(data),
Check: acceptance.ComposeTestCheckFunc(
data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error {
client := clients.Containers.SnapshotClient
clusterId, err := commonids.ParseKubernetesClusterID(state.ID)
if err != nil {
return err
}
poolId := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ManagedClusterName, "default")
id := snapshots.NewSnapshotID(poolId.SubscriptionId, poolId.ResourceGroupName, data.RandomString)
snapshot := snapshots.Snapshot{
Location: data.Locations.Primary,
Properties: &snapshots.SnapshotProperties{
CreationData: &snapshots.CreationData{
SourceResourceId: utils.String(poolId.ID()),
},
},
}
_, err = client.CreateOrUpdate(ctx, id, snapshot)
if err != nil {
return fmt.Errorf("creating %s: %+v", id, err)
}
return nil
}, "azurerm_kubernetes_cluster.source"),
),
},
{
Config: r.snapshotRestore(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
{
Config: r.snapshotSource(data),
Check: acceptance.ComposeTestCheckFunc(
data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error {
client := clients.Containers.SnapshotClient
clusterId, err := commonids.ParseKubernetesClusterID(state.ID)
if err != nil {
return err
}
poolId := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ManagedClusterName, "default")
id := snapshots.NewSnapshotID(poolId.SubscriptionId, poolId.ResourceGroupName, data.RandomString)
_, err = client.Delete(ctx, id)
if err != nil {
return fmt.Errorf("creating %s: %+v", id, err)
}
return nil
}, "azurerm_kubernetes_cluster.source"),
),
},
})
}

func (KubernetesClusterResource) basicAvailabilitySetConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down Expand Up @@ -2955,3 +3022,80 @@ resource "azurerm_kubernetes_cluster" "test" {
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, certsString)
}

func (KubernetesClusterResource) snapshotSource(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%[2]d"
location = "%[1]s"
}

resource "azurerm_kubernetes_cluster" "source" {
name = "acctestaks%[2]d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[2]d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
}
identity {
type = "SystemAssigned"
}
}
`, data.Locations.Primary, data.RandomInteger)
}

func (KubernetesClusterResource) snapshotRestore(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%[2]d"
location = "%[1]s"
}

resource "azurerm_kubernetes_cluster" "source" {
name = "acctestaks%[2]d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[2]d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
}
identity {
type = "SystemAssigned"
}
}

data "azurerm_kubernetes_node_pool_snapshot" "test" {
name = "%[3]s"
resource_group_name = azurerm_resource_group.test.name
}

resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%[2]dnew"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[2]dnew"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
snapshot_id = data.azurerm_kubernetes_node_pool_snapshot.test.id
}
identity {
type = "SystemAssigned"
}
}
`, data.Locations.Primary, data.RandomInteger, data.RandomString)
}
Original file line number Diff line number Diff line change
Expand Up @@ -2393,6 +2393,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{}
"default_node_pool.0.os_disk_type",
"default_node_pool.0.os_sku",
"default_node_pool.0.pod_subnet_id",
"default_node_pool.0.snapshot_id",
"default_node_pool.0.ultra_ssd_enabled",
"default_node_pool.0.vnet_subnet_id",
"default_node_pool.0.vm_size",
Expand Down
31 changes: 31 additions & 0 deletions internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/hashicorp/go-azure-sdk/resource-manager/compute/2022-03-01/proximityplacementgroups"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/agentpools"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/managedclusters"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/snapshots"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-provider-azurerm/internal/features"
computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate"
Expand Down Expand Up @@ -253,6 +254,12 @@ func SchemaDefaultNodePool() *pluginsdk.Schema {
}, false),
},

"snapshot_id": {
Type: pluginsdk.TypeString,
Optional: true,
ValidateFunc: snapshots.ValidateSnapshotID,
},

"host_group_id": {
Type: pluginsdk.TypeString,
Optional: true,
Expand Down Expand Up @@ -901,6 +908,14 @@ func ConvertDefaultNodePoolToAgentPool(input *[]managedclusters.ManagedClusterAg
agentpool.Properties.WorkloadRuntime = utils.ToPtr(agentpools.WorkloadRuntime(string(*workloadRuntimeNodePool)))
}

if creationData := defaultCluster.CreationData; creationData != nil {
if creationData.SourceResourceId != nil {
agentpool.Properties.CreationData = &agentpools.CreationData{
SourceResourceId: creationData.SourceResourceId,
}
}
}

return agentpool
}

Expand Down Expand Up @@ -997,6 +1012,12 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]managedclusters.Manage
profile.ScaleDownMode = utils.ToPtr(managedclusters.ScaleDownMode(scaleDownMode))
}

if snapshotId := raw["snapshot_id"].(string); snapshotId != "" {
profile.CreationData = &managedclusters.CreationData{
SourceResourceId: utils.String(snapshotId),
}
}

if ultraSSDEnabled, ok := raw["ultra_ssd_enabled"]; ok {
profile.EnableUltraSSD = utils.Bool(ultraSSDEnabled.(bool))
}
Expand Down Expand Up @@ -1398,6 +1419,15 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf
scaleDownMode = *agentPool.ScaleDownMode
}

snapshotId := ""
if agentPool.CreationData != nil && agentPool.CreationData.SourceResourceId != nil {
id, err := snapshots.ParseSnapshotIDInsensitively(*agentPool.CreationData.SourceResourceId)
if err != nil {
return nil, err
}
snapshotId = id.ID()
}

vmSize := ""
if agentPool.VMSize != nil {
vmSize = *agentPool.VMSize
Expand Down Expand Up @@ -1457,6 +1487,7 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf
"os_disk_type": string(osDiskType),
"os_sku": osSKU,
"scale_down_mode": string(scaleDownMode),
"snapshot_id": snapshotId,
"tags": tags.Flatten(agentPool.Tags),
"temporary_name_for_rotation": temporaryName,
"type": agentPoolType,
Expand Down
4 changes: 3 additions & 1 deletion website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ An `monitor_metrics` block supports the following:

A `default_node_pool` block supports the following:

-> **Note:** Changing certain properties of the `default_node_pool` is done by cycling the system node pool of the cluster. When cycling the system node pool, it doesn't perform cordon and drain, and it will disrupt rescheduling pods currently running on the previous system node pool.`temporary_name_for_rotation` must be specified when changing any of the following properties: `enable_host_encryption`, `enable_node_public_ip`, `kubelet_config`, `linux_os_config`, `max_pods`, `node_taints`, `only_critical_addons_enabled`, `os_disk_size_gb`, `os_disk_type`, `os_sku`, `pod_subnet_id`, `ultra_ssd_enabled`, `vnet_subnet_id`, `vm_size`, `zones`.
-> **Note:** Changing certain properties of the `default_node_pool` is done by cycling the system node pool of the cluster. When cycling the system node pool, it doesn't perform cordon and drain, and it will disrupt rescheduling pods currently running on the previous system node pool.`temporary_name_for_rotation` must be specified when changing any of the following properties: `enable_host_encryption`, `enable_node_public_ip`, `kubelet_config`, `linux_os_config`, `max_pods`, `node_taints`, `only_critical_addons_enabled`, `os_disk_size_gb`, `os_disk_type`, `os_sku`, `pod_subnet_id`, `snapshot_id`, `ultra_ssd_enabled`, `vnet_subnet_id`, `vm_size`, `zones`.

* `name` - (Required) The name which should be used for the default Kubernetes Node Pool. Changing this forces a new resource to be created.

Expand Down Expand Up @@ -436,6 +436,8 @@ A `default_node_pool` block supports the following:

* `scale_down_mode` - (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.

* `snapshot_id` - (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property.

* `temporary_name_for_rotation` - (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing.

* `type` - (Optional) The type of Node Pool which should be created. Possible values are `AvailabilitySet` and `VirtualMachineScaleSets`. Defaults to `VirtualMachineScaleSets`. Changing this forces a new resource to be created.
Expand Down