Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

azurerm_kubernetes_cluster - support for the default_node_pool.snapshot_id property #22708

Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,19 @@
package containers_test

import (
"context"
"fmt"
"strings"
"testing"

"github.com/hashicorp/go-azure-helpers/resourcemanager/commonids"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/agentpools"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/snapshots"
"github.com/hashicorp/terraform-plugin-testing/terraform"
"github.com/hashicorp/terraform-provider-azurerm/internal/acceptance"
"github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check"
"github.com/hashicorp/terraform-provider-azurerm/internal/clients"
"github.com/hashicorp/terraform-provider-azurerm/utils"
)

func TestAccKubernetesCluster_basicAvailabilitySet(t *testing.T) {
Expand Down Expand Up @@ -971,6 +977,67 @@ func TestAccKubernetesCluster_customCaTrustCerts(t *testing.T) {
})
}

func TestAccKubernetesCluster_snapshotId(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_kubernetes_cluster", "test")
r := KubernetesClusterResource{}

data.ResourceTest(t, r, []acceptance.TestStep{
{
Config: r.snapshotSource(data),
Check: acceptance.ComposeTestCheckFunc(
data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error {
client := clients.Containers.SnapshotClient
clusterId, err := commonids.ParseKubernetesClusterID(state.ID)
if err != nil {
return err
}
poolId := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ManagedClusterName, "default")
id := snapshots.NewSnapshotID(poolId.SubscriptionId, poolId.ResourceGroupName, data.RandomString)
snapshot := snapshots.Snapshot{
Location: data.Locations.Primary,
Properties: &snapshots.SnapshotProperties{
CreationData: &snapshots.CreationData{
SourceResourceId: utils.String(poolId.ID()),
},
},
}
_, err = client.CreateOrUpdate(ctx, id, snapshot)
if err != nil {
return fmt.Errorf("creating %s: %+v", id, err)
}
return nil
}, "azurerm_kubernetes_cluster.source"),
),
},
{
Config: r.snapshotRestore(data),
Check: acceptance.ComposeTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
),
},
data.ImportStep(),
{
Config: r.snapshotSource(data),
Check: acceptance.ComposeTestCheckFunc(
data.CheckWithClientForResource(func(ctx context.Context, clients *clients.Client, state *terraform.InstanceState) error {
client := clients.Containers.SnapshotClient
clusterId, err := commonids.ParseKubernetesClusterID(state.ID)
if err != nil {
return err
}
poolId := agentpools.NewAgentPoolID(clusterId.SubscriptionId, clusterId.ResourceGroupName, clusterId.ManagedClusterName, "default")
id := snapshots.NewSnapshotID(poolId.SubscriptionId, poolId.ResourceGroupName, data.RandomString)
_, err = client.Delete(ctx, id)
if err != nil {
return fmt.Errorf("creating %s: %+v", id, err)
}
return nil
}, "azurerm_kubernetes_cluster.source"),
),
},
})
}

func (KubernetesClusterResource) basicAvailabilitySetConfig(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
Expand Down Expand Up @@ -2954,3 +3021,80 @@ resource "azurerm_kubernetes_cluster" "test" {
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, certsString)
}

func (KubernetesClusterResource) snapshotSource(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%[2]d"
location = "%[1]s"
}

resource "azurerm_kubernetes_cluster" "source" {
name = "acctestaks%[2]d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[2]d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
}
identity {
type = "SystemAssigned"
}
}
`, data.Locations.Primary, data.RandomInteger)
}

func (KubernetesClusterResource) snapshotRestore(data acceptance.TestData) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}

resource "azurerm_resource_group" "test" {
name = "acctestRG-aks-%[2]d"
location = "%[1]s"
}

resource "azurerm_kubernetes_cluster" "source" {
name = "acctestaks%[2]d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[2]d"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
}
identity {
type = "SystemAssigned"
}
}

data "azurerm_kubernetes_node_pool_snapshot" "test" {
name = "%[3]s"
resource_group_name = azurerm_resource_group.test.name
}

resource "azurerm_kubernetes_cluster" "test" {
name = "acctestaks%[2]dnew"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
dns_prefix = "acctestaks%[2]dnew"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
snapshot_id = data.azurerm_kubernetes_node_pool_snapshot.test.id
}
identity {
type = "SystemAssigned"
}
}
`, data.Locations.Primary, data.RandomInteger, data.RandomString)
}
Original file line number Diff line number Diff line change
Expand Up @@ -2373,6 +2373,7 @@ func resourceKubernetesClusterUpdate(d *pluginsdk.ResourceData, meta interface{}
"default_node_pool.0.os_disk_type",
"default_node_pool.0.os_sku",
"default_node_pool.0.pod_subnet_id",
"default_node_pool.0.snapshot_id",
"default_node_pool.0.ultra_ssd_enabled",
"default_node_pool.0.vnet_subnet_id",
"default_node_pool.0.vm_size",
Expand Down
27 changes: 27 additions & 0 deletions internal/services/containers/kubernetes_nodepool.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/hashicorp/go-azure-sdk/resource-manager/compute/2022-03-01/proximityplacementgroups"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/agentpools"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/managedclusters"
"github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2023-04-02-preview/snapshots"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-provider-azurerm/internal/features"
computeValidate "github.com/hashicorp/terraform-provider-azurerm/internal/services/compute/validate"
Expand Down Expand Up @@ -253,6 +254,12 @@ func SchemaDefaultNodePool() *pluginsdk.Schema {
}, false),
},

"snapshot_id": {
Type: pluginsdk.TypeString,
Optional: true,
ValidateFunc: snapshots.ValidateSnapshotID,
},

"host_group_id": {
Type: pluginsdk.TypeString,
Optional: true,
Expand Down Expand Up @@ -884,6 +891,14 @@ func ConvertDefaultNodePoolToAgentPool(input *[]managedclusters.ManagedClusterAg
agentpool.Properties.WorkloadRuntime = utils.ToPtr(agentpools.WorkloadRuntime(string(*workloadRuntimeNodePool)))
}

if creationData := defaultCluster.CreationData; creationData != nil {
if creationData.SourceResourceId != nil {
agentpool.Properties.CreationData = &agentpools.CreationData{
SourceResourceId: creationData.SourceResourceId,
}
}
}

return agentpool
}

Expand Down Expand Up @@ -980,6 +995,12 @@ func ExpandDefaultNodePool(d *pluginsdk.ResourceData) (*[]managedclusters.Manage
profile.ScaleDownMode = utils.ToPtr(managedclusters.ScaleDownMode(scaleDownMode))
}

if snapshotId := raw["snapshot_id"].(string); snapshotId != "" {
profile.CreationData = &managedclusters.CreationData{
SourceResourceId: utils.String(snapshotId),
}
}

if ultraSSDEnabled, ok := raw["ultra_ssd_enabled"]; ok {
profile.EnableUltraSSD = utils.Bool(ultraSSDEnabled.(bool))
}
Expand Down Expand Up @@ -1381,6 +1402,11 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf
scaleDownMode = *agentPool.ScaleDownMode
}

snapshotId := ""
if agentPool.CreationData != nil && agentPool.CreationData.SourceResourceId != nil {
snapshotId = *agentPool.CreationData.SourceResourceId
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should parse this ID insensitively before setting it to state

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks! I've updated it.

}

vmSize := ""
if agentPool.VMSize != nil {
vmSize = *agentPool.VMSize
Expand Down Expand Up @@ -1440,6 +1466,7 @@ func FlattenDefaultNodePool(input *[]managedclusters.ManagedClusterAgentPoolProf
"os_disk_type": string(osDiskType),
"os_sku": osSKU,
"scale_down_mode": string(scaleDownMode),
"snapshot_id": snapshotId,
"tags": tags.Flatten(agentPool.Tags),
"temporary_name_for_rotation": temporaryName,
"type": agentPoolType,
Expand Down
2 changes: 2 additions & 0 deletions website/docs/r/kubernetes_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -436,6 +436,8 @@ A `default_node_pool` block supports the following:

* `scale_down_mode` - (Optional) Specifies the autoscaling behaviour of the Kubernetes Cluster. Allowed values are `Delete` and `Deallocate`. Defaults to `Delete`.

* `snapshot_id` - (Optional) The ID of the Snapshot which should be used to create this default Node Pool.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it support a rolling update? Or will the node pool be recreated on snapshot_id change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @js-315385995 , yes, it could be updated, but it is done by cycling the system node pool of the cluster, temporary_name_for_rotation must be specified when changing it. I've updated the doc as well.


* `temporary_name_for_rotation` - (Optional) Specifies the name of the temporary node pool used to cycle the default node pool for VM resizing.

* `type` - (Optional) The type of Node Pool which should be created. Possible values are `AvailabilitySet` and `VirtualMachineScaleSets`. Defaults to `VirtualMachineScaleSets`. Changing this forces a new resource to be created.
Expand Down