Skip to content

Commit

Permalink
feat(k8s): wait for pool to be ready and add 'nodes' in its output
Browse files Browse the repository at this point in the history
  • Loading branch information
debovema committed Feb 19, 2020
1 parent 72edd8c commit fb3593d
Show file tree
Hide file tree
Showing 7 changed files with 397 additions and 31 deletions.
44 changes: 44 additions & 0 deletions scaleway/helpers_k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ type KubeconfigStruct struct {
const (
K8SClusterWaitForReadyTimeout = 10 * time.Minute
K8SClusterWaitForDeletedTimeout = 10 * time.Minute
K8SPoolWaitForReadyTimeout = 10 * time.Minute
)

func k8sAPIWithRegion(d *schema.ResourceData, m interface{}) (*k8s.API, scw.Region, error) {
Expand Down Expand Up @@ -88,6 +89,49 @@ func waitK8SClusterDeleted(k8sAPI *k8s.API, region scw.Region, clusterID string)
return fmt.Errorf("Cluster %s has state %s, wants %s", clusterID, cluster.Status.String(), k8s.ClusterStatusDeleted.String())
}

func waitK8SPoolReady(k8sAPI *k8s.API, region scw.Region, poolID string) error {
return k8sAPI.WaitForPool(&k8s.WaitForPoolRequest{
PoolID: poolID,
Region: region,
Timeout: scw.DurationPtr(K8SPoolWaitForReadyTimeout),
})
}

// convert a list of nodes to a list of map
func convertNodes(res *k8s.ListNodesResponse) []map[string]interface{} {
var result []map[string]interface{}
for _, node := range res.Nodes {
n := make(map[string]interface{})
n["name"] = node.Name
n["pool_id"] = node.PoolID
n["status"] = node.Status.String()
if node.PublicIPV4 != nil && node.PublicIPV4.String() != "<nil>" {
n["public_ip"] = node.PublicIPV4.String()
}
if node.PublicIPV6 != nil && node.PublicIPV6.String() != "<nil>" {
n["public_ip_v6"] = node.PublicIPV6.String()
}
result = append(result, n)
}
return result
}

func getNodes(k8sAPI *k8s.API, pool *k8s.Pool) (interface{}, error) {
req := &k8s.ListNodesRequest{
Region: pool.Region,
ClusterID: pool.ClusterID,
PoolID: &pool.ID,
}

nodes, err := k8sAPI.ListNodes(req, scw.WithAllPages())

if err != nil {
return nil, err
}

return convertNodes(nodes), nil
}

func clusterAutoscalerConfigFlatten(cluster *k8s.Cluster) []map[string]interface{} {
autoscalerConfig := map[string]interface{}{}
autoscalerConfig["disable_scale_down"] = cluster.AutoscalerConfig.ScaleDownDisabled
Expand Down
134 changes: 104 additions & 30 deletions scaleway/resource_k8s_cluster_beta.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,12 @@ func resourceScalewayK8SClusterBeta() *schema.Resource {
k8s.RuntimeCrio.String(),
}, false),
},
"wait_for_pool_ready": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Whether to wait for the pool to be ready",
},
// Computed elements
"pool_id": {
Type: schema.TypeString,
Expand All @@ -210,6 +216,39 @@ func resourceScalewayK8SClusterBeta() *schema.Resource {
Computed: true,
Description: "The date and time of the last update of the default pool",
},
"nodes": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Computed: true,
Description: "The name of the node",
},
"pool_id": {
Type: schema.TypeString,
Computed: true,
Description: "The pool ID whose the node belongs to",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "The status of the node",
},
"public_ip": {
Type: schema.TypeString,
Computed: true,
Description: "The public IPv4 address of the node",
},
"public_ip_v6": {
Type: schema.TypeString,
Computed: true,
Description: "The public IPv6 address of the node",
},
},
},
},
"status": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -408,51 +447,42 @@ func resourceScalewayK8SClusterBetaCreate(d *schema.ResourceData, m interface{})

d.SetId(newRegionalId(region, res.ID))

err = waitK8SClusterReady(k8sAPI, region, res.ID)
err = waitK8SClusterReady(k8sAPI, region, res.ID) // wait for the cluster status to be ready
if err != nil {
return err
}

if d.Get("default_pool.0.wait_for_pool_ready").(bool) { // wait for the pool status to be ready (if specified)
pool, err := readDefaultPool(d, m) // ensure that 'default_pool.0.pool_id' is set
if err != nil {
return err
}

err = waitK8SPoolReady(k8sAPI, region, expandID(pool.ID))
if err != nil {
return err
}
}

return resourceScalewayK8SClusterBetaRead(d, m)
}

// resourceScalewayK8SClusterBetaDefaultPoolRead is only called after a resourceScalewayK8SClusterBetaCreate
// thus ensuring the uniqueness of the only pool listed
func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m interface{}) error {
k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id())
k8sAPI, region, _, err := k8sAPIWithRegionAndID(m, d.Id())
if err != nil {
return err
}

////
// Read default Pool
////

var pool *k8s.Pool

if defaultPoolID, ok := d.GetOk("default_pool.0.pool_id"); ok {
poolResp, err := k8sAPI.GetPool(&k8s.GetPoolRequest{
Region: region,
PoolID: expandID(defaultPoolID.(string)),
})
if err != nil {
return err
}
pool = poolResp
} else {
response, err := k8sAPI.ListPools(&k8s.ListPoolsRequest{
Region: region,
ClusterID: clusterID,
})
if err != nil {
return err
}

if len(response.Pools) != 1 {
return fmt.Errorf("Newly created pool on cluster %s has %d pools instead of 1", clusterID, len(response.Pools))
}
pool, err := readDefaultPool(d, m)
if err != nil {
return err
}

pool = response.Pools[0]
nodes, err := getNodes(k8sAPI, pool)
if err != nil {
return err
}

defaultPool := map[string]interface{}{}
Expand All @@ -466,6 +496,8 @@ func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m int
defaultPool["container_runtime"] = pool.ContainerRuntime
defaultPool["created_at"] = pool.CreatedAt.String()
defaultPool["updated_at"] = pool.UpdatedAt.String()
defaultPool["nodes"] = nodes
defaultPool["wait_for_pool_ready"] = d.Get("default_pool.0.wait_for_pool_ready")
defaultPool["status"] = pool.Status.String()

if pool.PlacementGroupID != nil {
Expand All @@ -479,6 +511,41 @@ func resourceScalewayK8SClusterBetaDefaultPoolRead(d *schema.ResourceData, m int
return nil
}

func readDefaultPool(d *schema.ResourceData, m interface{}) (*k8s.Pool, error) {
k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id())
if err != nil {
return nil, err
}

var pool *k8s.Pool

if defaultPoolID, ok := d.GetOk("default_pool.0.pool_id"); ok {
poolResp, err := k8sAPI.GetPool(&k8s.GetPoolRequest{
Region: region,
PoolID: expandID(defaultPoolID.(string)),
})
if err != nil {
return nil, err
}
pool = poolResp
} else {
response, err := k8sAPI.ListPools(&k8s.ListPoolsRequest{
Region: region,
ClusterID: clusterID,
})
if err != nil {
return nil, err
}

if len(response.Pools) != 1 {
return nil, fmt.Errorf("Newly created pool on cluster %s has %d pools instead of 1", clusterID, len(response.Pools))
}

pool = response.Pools[0]
}
return pool, nil
}

func resourceScalewayK8SClusterBetaRead(d *schema.ResourceData, m interface{}) error {
k8sAPI, region, clusterID, err := k8sAPIWithRegionAndID(m, d.Id())
if err != nil {
Expand Down Expand Up @@ -662,6 +729,13 @@ func resourceScalewayK8SClusterBetaDefaultPoolUpdate(d *schema.ResourceData, m i
}
}
}

if d.Get("default_pool.0.wait_for_pool_ready").(bool) { // wait for the pool to be ready if specified
err = waitK8SPoolReady(k8sAPI, region, expandID(defaultPoolID))
if err != nil {
return err
}
}
}

return resourceScalewayK8SClusterBetaDefaultPoolRead(d, m)
Expand Down
78 changes: 78 additions & 0 deletions scaleway/resource_k8s_cluster_beta_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,66 @@ func TestAccScalewayK8SClusterBetaDefaultPoolRecreate(t *testing.T) {
})
}

func TestAccScalewayK8SClusterBetaDefaultPoolWait(t *testing.T) {
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckScalewayK8SClusterBetaDestroy,
Steps: []resource.TestStep{
{
Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 1),
Check: resource.ComposeTestCheckFunc(
testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "version", "1.17.3"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "cni", "cilium"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()),
resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "1"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.node_type", "gp1_xs"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "1"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.0", "terraform-test"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.1", "scaleway_k8s_cluster_beta"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "tags.2", "default-pool"),
),
},
{
Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 2), // add a node and wait for the pool to be ready
Check: resource.ComposeTestCheckFunc(
testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()),
resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "2"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "2"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.1.status", k8s.NodeStatusReady.String()), // check that the new node has the "ready" status
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"),
),
},
{
Config: testAccCheckScalewayK8SClusterBetaConfigPoolWait("1.17.3", 1), // remove a node and wait for the pool to be ready
Check: resource.ComposeTestCheckFunc(
testAccCheckScalewayK8SClusterBetaExists("scaleway_k8s_cluster_beta.pool"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "status", k8s.ClusterStatusReady.String()),
resource.TestCheckResourceAttrSet("scaleway_k8s_cluster_beta.pool", "default_pool.0.pool_id"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.size", "1"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.min_size", "1"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.max_size", "1"),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.status", k8s.PoolStatusReady.String()),
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.0.status", k8s.NodeStatusReady.String()),
resource.TestCheckNoResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.nodes.1"), // check that the second node does not exist anymore
resource.TestCheckResourceAttr("scaleway_k8s_cluster_beta.pool", "default_pool.0.wait_for_pool_ready", "true"),
),
},
},
})
}

func TestAccScalewayK8SClusterBetaAutoUpgrade(t *testing.T) {
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Expand Down Expand Up @@ -411,6 +471,24 @@ resource "scaleway_k8s_cluster_beta" "pool" {
}`, version)
}

func testAccCheckScalewayK8SClusterBetaConfigPoolWait(version string, size int) string {
return fmt.Sprintf(`
resource "scaleway_k8s_cluster_beta" "pool" {
cni = "cilium"
version = "%s"
name = "default-pool"
default_pool {
node_type = "gp1_xs"
size = %d
min_size = 1
max_size = %d
container_runtime = "docker"
wait_for_pool_ready = true
}
tags = [ "terraform-test", "scaleway_k8s_cluster_beta", "default-pool" ]
}`, version, size, size)
}

func testAccCheckScalewayK8SClusterBetaConfigPoolWithPlacementGroup(version string) string {
return fmt.Sprintf(`
resource "scaleway_instance_placement_group" "pool_placement_group" {
Expand Down
Loading

0 comments on commit fb3593d

Please sign in to comment.