From 13accd2373f638f865e122f3ad04fcadb1e3004e Mon Sep 17 00:00:00 2001 From: Heng Lu Date: Thu, 27 Jul 2023 13:28:56 +0800 Subject: [PATCH 1/3] `azurerm_kubernetes_cluster` - support for the `default_node_pool.snapshot_id` property --- .../kubernetes_cluster_other_resource_test.go | 144 ++++++++++++++++++ .../containers/kubernetes_cluster_resource.go | 1 + .../containers/kubernetes_nodepool.go | 27 ++++ .../docs/r/kubernetes_cluster.html.markdown | 2 + 4 files changed, 174 insertions(+) diff --git a/internal/services/containers/kubernetes_cluster_other_resource_test.go b/internal/services/containers/kubernetes_cluster_other_resource_test.go index 71cabd4cf6f2..7e9e44249ef4 100644 --- a/internal/services/containers/kubernetes_cluster_other_resource_test.go +++ b/internal/services/containers/kubernetes_cluster_other_resource_test.go @@ -4,13 +4,19 @@ package containers_test import ( + "context" "fmt" "strings" "testing" "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/snapshots" + "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/utils" ) func TestAccKubernetesCluster_basicAvailabilitySet(t *testing.T) { @@ -971,6 +977,67 @@ func TestAccKubernetesCluster_customCaTrustCerts(t *testing.T) { }) } +func TestAccKubernetesCluster_snapshotId(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test") + r := KubernetesClusterResource{} + + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.snapshotSource(data), + Check: acceptance.ComposeTestCheckFunc( + data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + client := clients.Containers.SnapshotClient + clusterId, err := commonids.ParseKubernetesClusterID(state.ID) + if err != nil { + return err + } + poolId := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ManagedClusterName, "default") + id := snapshots.NewSnapshotID(poolId.SubscriptionId, poolId.ResourceGroupName, data.RandomString) + snapshot := snapshots.Snapshot{ + Location: data.Locations.Primary, + Properties: &snapshots.SnapshotProperties{ + CreationData: &snapshots.CreationData{ + SourceResourceId: utils.String(poolId.ID()), + }, + }, + } + _, err = client.CreateOrUpdate(ctx, id, snapshot) + if err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + return nil + }, "azurerm_kubernetes_cluster.source"), + ), + }, + { + Config: r.snapshotRestore(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.snapshotSource(data), + Check: acceptance.ComposeTestCheckFunc( + data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error { + client := clients.Containers.SnapshotClient + clusterId, err := commonids.ParseKubernetesClusterID(state.ID) + if err != nil { + return err + } + poolId := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ManagedClusterName, "default") + id := snapshots.NewSnapshotID(poolId.SubscriptionId, poolId.ResourceGroupName, data.RandomString) + _, err = client.Delete(ctx, id) + if err != nil { + return fmt.Errorf("creating %s: %+v", id, err) + } + return nil + }, "azurerm_kubernetes_cluster.source"), + ), + }, + }) +} + func (KubernetesClusterResource) basicAvailabilitySetConfig(data acceptance.TestData) string { return fmt.Sprintf(` provider "azurerm" { @@ -2954,3 +3021,80 @@ resource "azurerm_kubernetes_cluster" "test" { } `, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, certsString) } + +func (KubernetesClusterResource) snapshotSource(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%[2]d" + location = "%[1]s" +} + +resource "azurerm_kubernetes_cluster" "source" { + name = "acctestaks%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%[2]d" + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + identity { + type = "SystemAssigned" + } +} +`, data.Locations.Primary, data.RandomInteger) +} + +func (KubernetesClusterResource) snapshotRestore(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctestRG-aks-%[2]d" + location = "%[1]s" +} + +resource "azurerm_kubernetes_cluster" "source" { + name = "acctestaks%[2]d" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%[2]d" + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + } + identity { + type = "SystemAssigned" + } +} + +data "azurerm_kubernetes_node_pool_snapshot" "test" { + name = "%[3]s" + resource_group_name = azurerm_resource_group.test.name +} + +resource "azurerm_kubernetes_cluster" "test" { + name = "acctestaks%[2]dnew" + location = azurerm_resource_group.test.location + resource_group_name = azurerm_resource_group.test.name + dns_prefix = "acctestaks%[2]dnew" + default_node_pool { + name = "default" + node_count = 1 + vm_size = "Standard_DS2_v2" + snapshot_id = data.azurerm_kubernetes_node_pool_snapshot.test.id + } + identity { + type = "SystemAssigned" + } +} +`, data.Locations.Primary, data.RandomInteger, data.RandomString) +} diff --git a/internal/services/containers/kubernetes_cluster_resource.go b/internal/services/containers/kubernetes_cluster_resource.go index f9193fbd9569..cbbbd963182f 100644 --- a/internal/services/containers/kubernetes_cluster_resource.go +++ b/internal/services/containers/kubernetes_cluster_resource.go @@ -2373,6 +2373,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{} "default_node_pool.0.os_disk_type", "default_node_pool.0.os_sku", "default_node_pool.0.pod_subnet_id", + "default_node_pool.0.snapshot_id", "default_node_pool.0.ultra_ssd_enabled", "default_node_pool.0.vnet_subnet_id", "default_node_pool.0.vm_size", diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index 8084e4649b3a..30f17827a3af 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/go-azure-sdk/resource-manager/compute/2022-03-01/proximityplacementgroups" "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/agentpools" "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/managedclusters" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/snapshots" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-azurerm/internal/features" computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate" @@ -253,6 +254,12 @@ func SchemaDefaultNodePool() *pluginsdk.Schema { }, false), }, + "snapshot_id": { + Type: pluginsdk.TypeString, + Optional: true, + ValidateFunc: snapshots.ValidateSnapshotID, + }, + "host_group_id": { Type: pluginsdk.TypeString, Optional: true, @@ -884,6 +891,14 @@ func ConvertDefaultNodePoolToAgentPool(input *[]managedclusters.ManagedClusterAg agentpool.Properties.WorkloadRuntime = utils.ToPtr(agentpools.WorkloadRuntime(string(*workloadRuntimeNodePool))) } + if creationData := defaultCluster.CreationData; creationData != nil { + if creationData.SourceResourceId != nil { + agentpool.Properties.CreationData = &agentpools.CreationData{ + SourceResourceId: creationData.SourceResourceId, + } + } + } + return agentpool } @@ -980,6 +995,12 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]managedclusters.Manage profile.ScaleDownMode = utils.ToPtr(managedclusters.ScaleDownMode(scaleDownMode)) } + if snapshotId := raw["snapshot_id"].(string); snapshotId != "" { + profile.CreationData = &managedclusters.CreationData{ + SourceResourceId: utils.String(snapshotId), + } + } + if ultraSSDEnabled, ok := raw["ultra_ssd_enabled"]; ok { profile.EnableUltraSSD = utils.Bool(ultraSSDEnabled.(bool)) } @@ -1381,6 +1402,11 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf scaleDownMode = *agentPool.ScaleDownMode } + snapshotId := "" + if agentPool.CreationData != nil && agentPool.CreationData.SourceResourceId != nil { + snapshotId = *agentPool.CreationData.SourceResourceId + } + vmSize := "" if agentPool.VMSize != nil { vmSize = *agentPool.VMSize @@ -1440,6 +1466,7 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf "os_disk_type": string(osDiskType), "os_sku": osSKU, "scale_down_mode": string(scaleDownMode), + "snapshot_id": snapshotId, "tags": tags.Flatten(agentPool.Tags), "temporary_name_for_rotation": temporaryName, "type": agentPoolType, diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index dae708263e09..f3172229c503 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -436,6 +436,8 @@ A `default_node_pool` block supports the following: * `scale_down_mode` - (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. +* `snapshot_id` - (Optional) The ID of the Snapshot which should be used to create this default Node Pool. + * `temporary_name_for_rotation` - (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. * `type` - (Optional) The type of Node Pool which should be created. Possible values are `AvailabilitySet` and `VirtualMachineScaleSets`. Defaults to `VirtualMachineScaleSets`. Changing this forces a new resource to be created. From 390272308f1bf4a4363d4297b1a7e5e230734916 Mon Sep 17 00:00:00 2001 From: Heng Lu Date: Mon, 31 Jul 2023 16:46:40 +0800 Subject: [PATCH 2/3] update docs --- website/docs/r/kubernetes_cluster.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index f3172229c503..af2941effafc 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -370,7 +370,7 @@ An `monitor_metrics` block supports the following: A `default_node_pool` block supports the following: --> **Note:** Changing certain properties of the `default_node_pool` is done by cycling the system node pool of the cluster. `temporary_name_for_rotation` must be specified when changing any of the following properties: `enable_host_encryption`, `enable_node_public_ip`, `kubelet_config`, `linux_os_config`, `max_pods`, `node_taints`, `only_critical_addons_enabled`, `os_disk_size_gb`, `os_disk_type`, `os_sku`, `pod_subnet_id`, `ultra_ssd_enabled`, `vnet_subnet_id`, `vm_size`, `zones`. +-> **Note:** Changing certain properties of the `default_node_pool` is done by cycling the system node pool of the cluster. `temporary_name_for_rotation` must be specified when changing any of the following properties: `enable_host_encryption`, `enable_node_public_ip`, `kubelet_config`, `linux_os_config`, `max_pods`, `node_taints`, `only_critical_addons_enabled`, `os_disk_size_gb`, `os_disk_type`, `os_sku`, `pod_subnet_id`, `snapshot_id`, `ultra_ssd_enabled`, `vnet_subnet_id`, `vm_size`, `zones`. * `name` - (Required) The name which should be used for the default Kubernetes Node Pool. Changing this forces a new resource to be created. @@ -436,7 +436,7 @@ A `default_node_pool` block supports the following: * `scale_down_mode` - (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`. -* `snapshot_id` - (Optional) The ID of the Snapshot which should be used to create this default Node Pool. +* `snapshot_id` - (Optional) The ID of the Snapshot which should be used to create this default Node Pool. `temporary_name_for_rotation` must be specified when changing this property. * `temporary_name_for_rotation` - (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing. From 5d2b7d894768455e918725e0b16cd63f46c8a07a Mon Sep 17 00:00:00 2001 From: Heng Lu Date: Wed, 9 Aug 2023 10:04:48 +0800 Subject: [PATCH 3/3] parse snapshot id --- internal/services/containers/kubernetes_nodepool.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/services/containers/kubernetes_nodepool.go b/internal/services/containers/kubernetes_nodepool.go index 30f17827a3af..2d32db905fc1 100644 --- a/internal/services/containers/kubernetes_nodepool.go +++ b/internal/services/containers/kubernetes_nodepool.go @@ -1404,7 +1404,11 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf snapshotId := "" if agentPool.CreationData != nil && agentPool.CreationData.SourceResourceId != nil { - snapshotId = *agentPool.CreationData.SourceResourceId + id, err := snapshots.ParseSnapshotIDInsensitively(*agentPool.CreationData.SourceResourceId) + if err != nil { + return nil, err + } + snapshotId = id.ID() } vmSize := ""