From fb3593df321ef43d0184f010876b8c1317328f3a Mon Sep 17 00:00:00 2001 From: Mathieu Debove Date: Wed, 19 Feb 2020 13:06:14 +0100 Subject: [PATCH] feat(k8s): wait for pool to be ready and add 'nodes' in its output --- scaleway/helpers_k8s.go | 44 ++++++ scaleway/resource_k8s_cluster_beta.go | 134 ++++++++++++++---- scaleway/resource_k8s_cluster_beta_test.go | 78 ++++++++++ scaleway/resource_k8s_pool_beta.go | 67 ++++++++- scaleway/resource_k8s_pool_beta_test.go | 87 ++++++++++++ website/docs/r/k8s_cluster_beta.html.markdown | 9 ++ website/docs/r/k8s_pool_beta.html.markdown | 9 ++ 7 files changed, 397 insertions(+), 31 deletions(-) diff --git a/scaleway/helpers_k8s.go b/scaleway/helpers_k8s.go index 4bdc912540..8d43fe4215 100644 --- a/scaleway/helpers_k8s.go +++ b/scaleway/helpers_k8s.go @@ -37,6 +37,7 @@ type KubeconfigStruct struct { const ( K8SClusterWaitForReadyTimeout = 10 * time.Minute K8SClusterWaitForDeletedTimeout = 10 * time.Minute + K8SPoolWaitForReadyTimeout = 10 * time.Minute ) func k8sAPIWithRegion(d *schema.ResourceData, m interface{}) (*k8s.API, scw.Region, error) { @@ -88,6 +89,49 @@ func waitK8SClusterDeleted(k8sAPI *k8s.API, region scw.Region, clusterID string) return fmt.Errorf("Cluster %s has state %s, wants %s", clusterID, cluster.Status.String(), k8s.ClusterStatusDeleted.String()) } +func waitK8SPoolReady(k8sAPI *k8s.API, region scw.Region, poolID string) error { + return k8sAPI.WaitForPool(&k8s.WaitForPoolRequest{ + PoolID: poolID, + Region: region, + Timeout: scw.DurationPtr(K8SPoolWaitForReadyTimeout), + }) +} + +// convert a list of nodes to a list of map +func convertNodes(res *k8s.ListNodesResponse) []map[string]interface{} { + var result []map[string]interface{} + for _, node := range res.Nodes { + n := make(map[string]interface{}) + n["name"] = node.Name + n["pool_id"] = node.PoolID + n["status"] = node.Status.String() + if node.PublicIPV4 != nil && node.PublicIPV4.String() != "" { + n["public_ip"] = node.PublicIPV4.String() + } + if node.PublicIPV6 != nil && node.PublicIPV6.String() != "" { + n["public_ip_v6"] = node.PublicIPV6.String() + } + result = append(result, n) + } + return result +} + +func getNodes(k8sAPI *k8s.API, pool *k8s.Pool) (interface{}, error) { + req := &k8s.ListNodesRequest{ + Region: pool.Region, + ClusterID: pool.ClusterID, + PoolID: &pool.ID, + } + + nodes, err := k8sAPI.ListNodes(req, scw.WithAllPages()) + + if err != nil { + return nil, err + } + + return convertNodes(nodes), nil +} + func clusterAutoscalerConfigFlatten(cluster *k8s.Cluster) []map[string]interface{} { autoscalerConfig := map[string]interface{}{} autoscalerConfig["disable_scale_down"] = cluster.AutoscalerConfig.ScaleDownDisabled diff --git a/scaleway/resource_k8s_cluster_beta.go b/scaleway/resource_k8s_cluster_beta.go index f65eb939f6..4c0880fa90 100644 --- a/scaleway/resource_k8s_cluster_beta.go +++ b/scaleway/resource_k8s_cluster_beta.go @@ -194,6 +194,12 @@ func resourceScalewayK8SClusterBeta() *schema.Resource { k8s.RuntimeCrio.String(), }, false), }, + "wait_for_pool_ready": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to wait for the pool to be ready", + }, // Computed elements "pool_id": { Type: schema.TypeString, @@ -210,6 +216,39 @@ func resourceScalewayK8SClusterBeta() *schema.Resource { Computed: true, Description: "The date and time of the last update of the default pool", }, + "nodes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the node", + }, + "pool_id": { + Type: schema.TypeString, + Computed: true, + Description: "The pool ID whose the node belongs to", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the node", + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + Description: "The public IPv4 address of the node", + }, + "public_ip_v6": { + Type: schema.TypeString, + Computed: true, + Description: "The public IPv6 address of the node", + }, + }, + }, + }, "status": { Type: schema.TypeString, Computed: true, @@ -408,51 +447,42 @@ func resourceScalewayK8SClusterBetaCreate(d *schema.ResourceData, m interface{}) d.SetId(newRegionalId(region, res.ID)) - err = waitK8SClusterReady(k8sAPI, region, res.ID) + err = waitK8SClusterReady(k8sAPI, region, res.ID) // wait for the cluster status to be ready if err != nil { return err } + if d.Get("default_pool.0.wait_for_pool_ready").(bool) { // wait for the pool status to be ready (if specified) + pool, err := readDefaultPool(d, m) // ensure that 'default_pool.0.pool_id' is set + if err != nil { + return err + } + + err = waitK8SPoolReady(k8sAPI, region, expandID(pool.ID)) + if err != nil { + return err + } + } + return resourceScalewayK8SClusterBetaRead(d, m) } // resourceScalewayK8SClusterBetaDefaultPoolRead is only called after a resourceScalewayK8SClusterBetaCreate // thus ensuring the uniqueness of the only pool listed func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m interface{}) error { - k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id()) + k8sAPI, region, _, err := k8sAPIWithRegionAndID(m, d.Id()) if err != nil { return err } - //// - // Read default Pool - //// - - var pool *k8s.Pool - - if defaultPoolID, ok := d.GetOk("default_pool.0.pool_id"); ok { - poolResp, err := k8sAPI.GetPool(&k8s.GetPoolRequest{ - Region: region, - PoolID: expandID(defaultPoolID.(string)), - }) - if err != nil { - return err - } - pool = poolResp - } else { - response, err := k8sAPI.ListPools(&k8s.ListPoolsRequest{ - Region: region, - ClusterID: clusterID, - }) - if err != nil { - return err - } - - if len(response.Pools) != 1 { - return fmt.Errorf("Newly created pool on cluster %s has %d pools instead of 1", clusterID, len(response.Pools)) - } + pool, err := readDefaultPool(d, m) + if err != nil { + return err + } - pool = response.Pools[0] + nodes, err := getNodes(k8sAPI, pool) + if err != nil { + return err } defaultPool := map[string]interface{}{} @@ -466,6 +496,8 @@ func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m int defaultPool["container_runtime"] = pool.ContainerRuntime defaultPool["created_at"] = pool.CreatedAt.String() defaultPool["updated_at"] = pool.UpdatedAt.String() + defaultPool["nodes"] = nodes + defaultPool["wait_for_pool_ready"] = d.Get("default_pool.0.wait_for_pool_ready") defaultPool["status"] = pool.Status.String() if pool.PlacementGroupID != nil { @@ -479,6 +511,41 @@ func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m int return nil } +func readDefaultPool(d *schema.ResourceData, m interface{}) (*k8s.Pool, error) { + k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id()) + if err != nil { + return nil, err + } + + var pool *k8s.Pool + + if defaultPoolID, ok := d.GetOk("default_pool.0.pool_id"); ok { + poolResp, err := k8sAPI.GetPool(&k8s.GetPoolRequest{ + Region: region, + PoolID: expandID(defaultPoolID.(string)), + }) + if err != nil { + return nil, err + } + pool = poolResp + } else { + response, err := k8sAPI.ListPools(&k8s.ListPoolsRequest{ + Region: region, + ClusterID: clusterID, + }) + if err != nil { + return nil, err + } + + if len(response.Pools) != 1 { + return nil, fmt.Errorf("Newly created pool on cluster %s has %d pools instead of 1", clusterID, len(response.Pools)) + } + + pool = response.Pools[0] + } + return pool, nil +} + func resourceScalewayK8SClusterBetaRead(d *schema.ResourceData, m interface{}) error { k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id()) if err != nil { @@ -662,6 +729,13 @@ func resourceScalewayK8SClusterBetaDefaultPoolUpdate(d *schema.ResourceData, m i } } } + + if d.Get("default_pool.0.wait_for_pool_ready").(bool) { // wait for the pool to be ready if specified + err = waitK8SPoolReady(k8sAPI, region, expandID(defaultPoolID)) + if err != nil { + return err + } + } } return resourceScalewayK8SClusterBetaDefaultPoolRead(d, m) diff --git a/scaleway/resource_k8s_cluster_beta_test.go b/scaleway/resource_k8s_cluster_beta_test.go index 981e75a2c5..beba34ec85 100644 --- a/scaleway/resource_k8s_cluster_beta_test.go +++ b/scaleway/resource_k8s_cluster_beta_test.go @@ -249,6 +249,66 @@ func TestAccScalewayK8SClusterBetaDefaultPoolRecreate(t *testing.T) { }) } +func TestAccScalewayK8SClusterBetaDefaultPoolWait(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewayK8SClusterBetaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "version", "1.17.3"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "cni", "cilium"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()), + resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.node_type", "gp1_xs"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.0", "terraform-test"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.1", "scaleway_k8s_cluster_beta"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.2", "default-pool"), + ), + }, + { + Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 2), // add a node and wait for the pool to be ready + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()), + resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "2"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "2"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.1.status", k8s.NodeStatusReady.String()), // check that the new node has the "ready" status + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"), + ), + }, + { + Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 1), // remove a node and wait for the pool to be ready + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()), + resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()), + resource.TestCheckNoResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.1"), // check that the second node does not exist anymore + resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"), + ), + }, + }, + }) +} + func TestAccScalewayK8SClusterBetaAutoUpgrade(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -411,6 +471,24 @@ resource "scaleway_k8s_cluster_beta" "pool" { }`, version) } +func testAccCheckScalewayK8SClusterBetaConfigPoolWait(version string, size int) string { + return fmt.Sprintf(` +resource "scaleway_k8s_cluster_beta" "pool" { + cni = "cilium" + version = "%s" + name = "default-pool" + default_pool { + node_type = "gp1_xs" + size = %d + min_size = 1 + max_size = %d + container_runtime = "docker" + wait_for_pool_ready = true + } + tags = [ "terraform-test", "scaleway_k8s_cluster_beta", "default-pool" ] +}`, version, size, size) +} + func testAccCheckScalewayK8SClusterBetaConfigPoolWithPlacementGroup(version string) string { return fmt.Sprintf(` resource "scaleway_instance_placement_group" "pool_placement_group" { diff --git a/scaleway/resource_k8s_pool_beta.go b/scaleway/resource_k8s_pool_beta.go index cdfd7b986e..6865e2bcf8 100644 --- a/scaleway/resource_k8s_pool_beta.go +++ b/scaleway/resource_k8s_pool_beta.go @@ -77,6 +77,12 @@ func resourceScalewayK8SPoolBeta() *schema.Resource { k8s.RuntimeCrio.String(), }, false), }, + "wait_for_pool_ready": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to wait for the pool to be ready", + }, "placement_group_id": { Type: schema.TypeString, Optional: true, @@ -101,6 +107,44 @@ func resourceScalewayK8SPoolBeta() *schema.Resource { Computed: true, Description: "The Kubernetes version of the pool", }, + "nodes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the node", + }, + "pool_id": { + Type: schema.TypeString, + Computed: true, + Description: "The pool ID whose the node belongs to", + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the node", + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + Description: "The public IPv4 address of the node", + }, + "public_ip_v6": { + Type: schema.TypeString, + Computed: true, + Description: "The public IPv6 address of the node", + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + Description: "The status of the pool", + }, }, } } @@ -150,6 +194,13 @@ func resourceScalewayK8SPoolBetaCreate(d *schema.ResourceData, m interface{}) er d.SetId(newRegionalId(region, res.ID)) + if d.Get("wait_for_pool_ready").(bool) { // wait for nodes to be ready if specified + err = waitK8SPoolReady(k8sAPI, region, res.ID) + if err != nil { + return err + } + } + return resourceScalewayK8SPoolBetaRead(d, m) } @@ -174,6 +225,11 @@ func resourceScalewayK8SPoolBetaRead(d *schema.ResourceData, m interface{}) erro return err } + nodes, err := getNodes(k8sAPI, pool) + if err != nil { + return err + } + _ = d.Set("cluster_id", newRegionalId(region, pool.ClusterID)) _ = d.Set("name", pool.Name) _ = d.Set("node_type", pool.NodeType) @@ -186,6 +242,8 @@ func resourceScalewayK8SPoolBetaRead(d *schema.ResourceData, m interface{}) erro _ = d.Set("container_runtime", pool.ContainerRuntime) _ = d.Set("created_at", pool.CreatedAt) _ = d.Set("updated_at", pool.UpdatedAt) + _ = d.Set("nodes", nodes) + _ = d.Set("status", pool.Status) if pool.PlacementGroupID != nil { _ = d.Set("placement_group_id", newZonedIdFromRegion(region, *pool.PlacementGroupID)) // TODO fix this ZonedIdFromRegion @@ -228,11 +286,18 @@ func resourceScalewayK8SPoolBetaUpdate(d *schema.ResourceData, m interface{}) er updateRequest.Size = scw.Uint32Ptr(uint32(d.Get("size").(int))) } - _, err = k8sAPI.UpdatePool(updateRequest) + res, err := k8sAPI.UpdatePool(updateRequest) if err != nil { return err } + if d.Get("wait_for_pool_ready").(bool) { // wait for nodes to be ready if specified + err = waitK8SPoolReady(k8sAPI, region, res.ID) + if err != nil { + return err + } + } + return resourceScalewayK8SPoolBetaRead(d, m) } diff --git a/scaleway/resource_k8s_pool_beta_test.go b/scaleway/resource_k8s_pool_beta_test.go index 0a61276fb3..ee5e51302e 100644 --- a/scaleway/resource_k8s_pool_beta_test.go +++ b/scaleway/resource_k8s_pool_beta_test.go @@ -45,6 +45,63 @@ func TestAccScalewayK8SClusterPoolMinimal(t *testing.T) { }) } +func TestAccScalewayK8SClusterPoolWait(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewayK8SClusterBetaDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckScalewayK8SPoolBetaConfigWait("1.17.3", false, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.minimal"), + ), + }, + { + Config: testAccCheckScalewayK8SPoolBetaConfigWait("1.17.3", true, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.minimal"), + testAccCheckScalewayK8SPoolBetaExists("scaleway_k8s_pool_beta.minimal"), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "status", k8s.PoolStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "nodes.0.status", k8s.NodeStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "wait_for_pool_ready", "true"), + ), + }, + { + Config: testAccCheckScalewayK8SPoolBetaConfigWait("1.17.3", true, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.minimal"), + testAccCheckScalewayK8SPoolBetaExists("scaleway_k8s_pool_beta.minimal"), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "size", "2"), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "status", k8s.PoolStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "nodes.0.status", k8s.NodeStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "nodes.1.status", k8s.NodeStatusReady.String()), // check that the new node has the "ready" status + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "wait_for_pool_ready", "true"), + ), + }, + { + Config: testAccCheckScalewayK8SPoolBetaConfigWait("1.17.3", true, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.minimal"), + testAccCheckScalewayK8SPoolBetaExists("scaleway_k8s_pool_beta.minimal"), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "size", "1"), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "status", k8s.PoolStatusReady.String()), + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "nodes.0.status", k8s.NodeStatusReady.String()), + resource.TestCheckNoResourceAttr("scaleway_k8s_pool_beta.minimal", "nodes.1"), // check that the second node does not exist anymore + resource.TestCheckResourceAttr("scaleway_k8s_pool_beta.minimal", "wait_for_pool_ready", "true"), + ), + }, + { + Config: testAccCheckScalewayK8SPoolBetaConfigWait("1.17.3", false, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.minimal"), + testAccCheckScalewayK8SPoolBetaDestroy, + ), + }, + }, + }) +} func TestAccScalewayK8SClusterPoolPlacementGroup(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -147,6 +204,36 @@ resource "scaleway_k8s_cluster_beta" "minimal" { %s`, version, pool) } +func testAccCheckScalewayK8SPoolBetaConfigWait(version string, otherPool bool, otherPoolSize int) string { + pool := "" + if otherPool { + pool += fmt.Sprintf(` +resource "scaleway_k8s_pool_beta" "minimal" { + name = "minimal" + cluster_id = scaleway_k8s_cluster_beta.minimal.id + node_type = "gp1_xs" + size = %d + min_size = 1 + max_size = %d + + wait_for_pool_ready = true +}`, otherPoolSize, otherPoolSize) + } + + return fmt.Sprintf(` +resource "scaleway_k8s_cluster_beta" "minimal" { + name = "minimal" + cni = "calico" + version = "%s" + default_pool { + node_type = "gp1_xs" + size = 1 + } + tags = [ "terraform-test", "scaleway_k8s_cluster_beta", "minimal" ] +} +%s`, version, pool) +} + func testAccCheckScalewayK8SPoolBetaConfigPlacementGroup(version string) string { return fmt.Sprintf(` resource "scaleway_instance_placement_group" "placement_group" { diff --git a/website/docs/r/k8s_cluster_beta.html.markdown b/website/docs/r/k8s_cluster_beta.html.markdown index 833acbf1a5..8eb6bfa47d 100644 --- a/website/docs/r/k8s_cluster_beta.html.markdown +++ b/website/docs/r/k8s_cluster_beta.html.markdown @@ -151,6 +151,8 @@ The following arguments are supported: - `container_runtime` - (Defaults to `docker`) The container runtime of the default pool. ~> **Important:** Updates to this field will recreate a new default pool. + - `wait_for_pool_ready` - (Default to `false`) Whether to wait for the pool to be ready + - `region` - (Defaults to [provider](../index.html#region) `region`) The [region](../guides/regions_and_zones.html#regions) in which the cluster should be created. - `organization_id` - (Defaults to [provider](../index.html#organization_id) `organization_id`) The ID of the organization the cluster is associated with. @@ -173,6 +175,13 @@ In addition to all above arguments, the following attributes are exported: - `status` - The status of the Kubernetes cluster. - `default_pool` - `pool_id` - The ID of the default pool. + - `status` - The status of the default pool. + - `nodes` - (List of) The nodes in the default pool. + - `name` - The name of the node. + - `pool_id` - The ID of the pool. + - `public_ip` - The public IPv4. + - `public_ip_v6` - The public IPv6. + - `status` - The status of the node. - `created_at` - The creation date of the default pool. - `updated_at` - The last update date of the default pool. - `upgrade_available` - Set to `true` if a newer Kubernetes version is available. diff --git a/website/docs/r/k8s_pool_beta.html.markdown b/website/docs/r/k8s_pool_beta.html.markdown index 0bbbba551c..89f4763238 100644 --- a/website/docs/r/k8s_pool_beta.html.markdown +++ b/website/docs/r/k8s_pool_beta.html.markdown @@ -67,11 +67,20 @@ The following arguments are supported: - `region` - (Defaults to [provider](../index.html#region) `region`) The [region](../guides/regions_and_zones.html#regions) in which the pool should be created. +- `wait_for_pool_ready` - (Default to `false`) Whether to wait for the pool to be ready + ## Attributes Reference In addition to all above arguments, the following attributes are exported: - `id` - The ID of the pool. +- `status` - The status of the pool. +- `nodes` - (List of) The nodes in the default pool. + - `name` - The name of the node. + - `pool_id` - The ID of the pool. + - `public_ip` - The public IPv4. + - `public_ip_v6` - The public IPv6. + - `status` - The status of the node. - `created_at` - The creation date of the pool. - `updated_at` - The last update date of the pool. - `version` - The version of the pool.