Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(k8s): only recreate default pool on its update #375

Merged
merged 3 commits into from
Dec 19, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 45 additions & 27 deletions scaleway/resource_k8s_cluster_beta.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ func resourceScalewayK8SClusterBeta() *schema.Resource {
"node_type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Server type of the default pool servers",
DiffSuppressFunc: diffSuppressFuncIgnoreCaseAndHyphen,
},
Expand Down Expand Up @@ -119,14 +118,12 @@ func resourceScalewayK8SClusterBeta() *schema.Resource {
"placement_group_id": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: nil,
Description: "ID of the placement group for the default pool",
},
"container_runtime": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "docker",
Description: "Container runtime for the default pool",
},
Expand Down Expand Up @@ -480,38 +477,50 @@ func resourceScalewayK8SClusterBetaDefaultPoolUpdate(d *schema.ResourceData, m i
if d.HasChange("default_pool") {
defaultPoolID := d.Get("default_pool.0.pool_id").(string)

updateRequest := &k8s.UpdatePoolRequest{
Region: region,
PoolID: expandID(defaultPoolID),
}
forceNew := false
oldPoolID := ""
if d.HasChange("default_pool.0.container_runtime") || d.HasChange("default_pool.0.node_type") || d.HasChange("default_pool.0.placement_group_id") {
forceNew = true
oldPoolID = defaultPoolID
} else {
updateRequest := &k8s.UpdatePoolRequest{
Region: region,
PoolID: expandID(defaultPoolID),
}

if autohealing, ok := d.GetOk("default_pool.0.autohealing"); ok {
updateRequest.Autohealing = scw.BoolPtr(autohealing.(bool))
}
if autohealing, ok := d.GetOk("default_pool.0.autohealing"); ok {
updateRequest.Autohealing = scw.BoolPtr(autohealing.(bool))
}

if minSize, ok := d.GetOk("default_pool.0.min_size"); ok {
updateRequest.MinSize = scw.Uint32Ptr(uint32(minSize.(int)))
}
if minSize, ok := d.GetOk("default_pool.0.min_size"); ok {
updateRequest.MinSize = scw.Uint32Ptr(uint32(minSize.(int)))
}

if maxSize, ok := d.GetOk("default_pool.0.max_size"); ok {
updateRequest.MaxSize = scw.Uint32Ptr(uint32(maxSize.(int)))
}
if maxSize, ok := d.GetOk("default_pool.0.max_size"); ok {
updateRequest.MaxSize = scw.Uint32Ptr(uint32(maxSize.(int)))
}

if autoscaling, ok := d.GetOk("default_pool.0.autoscaling"); ok {
updateRequest.Autoscaling = scw.BoolPtr(autoscaling.(bool))
}
if autoscaling, ok := d.GetOk("default_pool.0.autoscaling"); ok {
updateRequest.Autoscaling = scw.BoolPtr(autoscaling.(bool))
}

if d.Get("default_pool.0.autoscaling").(bool) == false {
if size, ok := d.GetOk("default_pool.0.size"); ok {
updateRequest.Size = scw.Uint32Ptr(uint32(size.(int)))
if d.Get("default_pool.0.autoscaling").(bool) == false {
if size, ok := d.GetOk("default_pool.0.size"); ok {
updateRequest.Size = scw.Uint32Ptr(uint32(size.(int)))
}
}
}

_, err := k8sAPI.UpdatePool(updateRequest)
if err != nil {
if !is404Error(err) {
return err
_, err := k8sAPI.UpdatePool(updateRequest)
if err != nil {
if !is404Error(err) {
Sh4d1 marked this conversation as resolved.
Show resolved Hide resolved
return err
}
l.Warningf("default node pool %s is not found, recreating a new one", defaultPoolID)
forceNew = true
}
}

if forceNew {
defaultPoolRequest := &k8s.CreatePoolRequest{
Region: region,
ClusterID: clusterID,
Expand Down Expand Up @@ -546,6 +555,15 @@ func resourceScalewayK8SClusterBetaDefaultPoolUpdate(d *schema.ResourceData, m i

d.Set("default_pool", []map[string]interface{}{defaultPool})

if oldPoolID != "" {
_, err = k8sAPI.DeletePool(&k8s.DeletePoolRequest{
Region: region,
PoolID: expandID(oldPoolID),
})
if err != nil {
return err
}
}
}
}

Expand Down
38 changes: 38 additions & 0 deletions scaleway/resource_k8s_cluster_beta_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,30 @@ func TestAccScalewayK8SClusterBetaDefaultPoolWithPlacementGroup(t *testing.T) {
})
}

func TestAccScalewayK8SClusterBetaDefaultPoolRecreate(t *testing.T) {
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckScalewayK8SClusterBetaDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckScalewayK8SClusterBetaDefaultPoolRecreate("gp1_xs"),
Check: resource.ComposeTestCheckFunc(
testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.recreate_pool"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.recreate_pool", "status", k8s.ClusterStatusReady.String()),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.recreate_pool", "default_pool.0.node_type", "gp1_xs"),
),
},
{
Config: testAccCheckScalewayK8SClusterBetaDefaultPoolRecreate("gp1_s"),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.recreate_pool", "default_pool.0.node_type", "gp1_s"),
),
},
},
})
}

func testAccCheckScalewayK8SClusterBetaDestroy(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "scaleway_k8s_cluster_beta" {
Expand Down Expand Up @@ -370,3 +394,17 @@ resource "scaleway_k8s_cluster_beta" "pool_placement_group" {
tags = [ "terraform-test", "scaleway_k8s_cluster_beta", "default-pool-placement-group" ]
}`, version)
}

func testAccCheckScalewayK8SClusterBetaDefaultPoolRecreate(nodeType string) string {
return fmt.Sprintf(`
resource "scaleway_k8s_cluster_beta" "recreate_pool" {
cni = "calico"
version = "1.17.0"
name = "default-pool"
default_pool {
node_type = "%s"
size = 1
}
tags = [ "terraform-test", "scaleway_k8s_cluster_beta", "recreate-pool" ]
}`, nodeType)
}
4 changes: 3 additions & 1 deletion website/docs/r/k8s_cluster_beta.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ The following arguments are supported:
- `default_pool` - (Required) The cluster's default pool configuration.

- `node_type` - (Required) The commercial type of the default pool instances.
~> **Important:** Updates to this field will recreate a new resource.
~> **Important:** Updates to this field will recreate a new default pool.

- `size` - (Required) The size of the default pool.

Expand All @@ -130,13 +130,15 @@ The following arguments are supported:
- `max_size` - (Defaults to `size`) The maximum size of the default pool, used by the autoscaling feature.

- `placement_group_id` - (Optional) The [placement group](https://developers.scaleway.com/en/products/instance/api/#placement-groups-d8f653) the nodes of the pool will be attached to.
~> **Important:** Updates to this field will recreate a new default pool.

- `autoscaling` - (Defaults to `false`) Enables the autoscaling feature for the default pool.
~> **Important:** When enabled, an update of the `size` will not be taken into account.

- `autohealing` - (Defaults to `false`) Enables the autohealing feature for the default pool.

- `container_runtime` - (Defaults to `docker`) The container runtime of the default pool.
~> **Important:** Updates to this field will recreate a new default pool.

- `region` - (Defaults to [provider](../index.html#region) `region`) The [region](../guides/regions_and_zones.html#regions) in which the cluster should be created.

Expand Down