Skip to content

Commit

Permalink
added timeouts for karbon cluster (#563)
Browse files Browse the repository at this point in the history
  • Loading branch information
Abhishekism9450 authored Apr 27, 2023
1 parent 3ceb4bd commit 240a796
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 5 deletions.
16 changes: 11 additions & 5 deletions nutanix/resource_nutanix_karbon_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,11 @@ func resourceNutanixKarbonCluster() *schema.Resource {
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(DEFAULTWAITTIMEOUT * time.Minute),
Update: schema.DefaultTimeout(DEFAULTWAITTIMEOUT * time.Minute),
Delete: schema.DefaultTimeout(DEFAULTWAITTIMEOUT * time.Minute),
},
SchemaVersion: 1,
Schema: KarbonClusterResourceMap(),
}
Expand All @@ -75,6 +80,7 @@ func KarbonClusterResourceMap() map[string]*schema.Schema {
Optional: true,
Default: DEFAULTWAITTIMEOUT,
ValidateFunc: validation.IntAtLeast(MINIMUMWAITTIMEOUT),
Deprecated: "use timeouts instead",
},
"name": {
Type: schema.TypeString,
Expand Down Expand Up @@ -513,7 +519,7 @@ func resourceNutanixKarbonClusterCreate(ctx context.Context, d *schema.ResourceD
}
// Set terraform state id
d.SetId(createClusterResponse.ClusterUUID)
err = WaitForKarbonCluster(ctx, client, timeout, createClusterResponse.TaskUUID)
err = WaitForKarbonCluster(ctx, client, timeout, createClusterResponse.TaskUUID, d.Timeout(schema.TimeoutCreate))
if err != nil {
return diag.FromErr(err)
}
Expand Down Expand Up @@ -638,7 +644,7 @@ func resourceNutanixKarbonClusterUpdate(ctx context.Context, d *schema.ResourceD
if err != nil {
return diag.FromErr(err)
}
err = WaitForKarbonCluster(ctx, client, timeout, taskUUID)
err = WaitForKarbonCluster(ctx, client, timeout, taskUUID, d.Timeout(schema.TimeoutUpdate))
if err != nil {
return diag.FromErr(err)
}
Expand Down Expand Up @@ -685,7 +691,7 @@ func resourceNutanixKarbonClusterDelete(ctx context.Context, d *schema.ResourceD
if err != nil {
return diag.Errorf("error while deleting Karbon Cluster UUID(%s): %s", d.Id(), err)
}
err = WaitForKarbonCluster(ctx, client, timeout, clusterDeleteResponse.TaskUUID)
err = WaitForKarbonCluster(ctx, client, timeout, clusterDeleteResponse.TaskUUID, d.Timeout(schema.TimeoutDelete))
if err != nil {
return diag.Errorf("error while waiting for Karbon Cluster deletion with UUID(%s): %s", d.Id(), err)
}
Expand Down Expand Up @@ -984,12 +990,12 @@ func GetNodePoolsForCluster(conn *karbon.Client, karbonClusterName string, nodep
return nodepoolStructs, nil
}

func WaitForKarbonCluster(ctx context.Context, client *Client, waitTimeoutMinutes int64, taskUUID string) error {
func WaitForKarbonCluster(ctx context.Context, client *Client, waitTimeoutMinutes int64, taskUUID string, timeout time.Duration) error {
stateConf := &resource.StateChangeConf{
Pending: []string{"QUEUED", "RUNNING"},
Target: []string{"SUCCEEDED"},
Refresh: taskStateRefreshFunc(client.API, taskUUID),
Timeout: time.Duration(waitTimeoutMinutes) * time.Minute,
Timeout: timeout,
Delay: WAITDELAY,
MinTimeout: WAITMINTIMEOUT,
}
Expand Down
57 changes: 57 additions & 0 deletions website/docs/r/karbon_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,61 @@ resource "nutanix_karbon_cluster" "example_cluster" {
```


### resource to create karbon cluster with timeouts
```hcl
resource "nutanix_karbon_cluster" "example_cluster" {
name = "example_cluster"
version = "1.18.15-1"
storage_class_config {
reclaim_policy = "Delete"
volumes_config {
file_system = "ext4"
flash_mode = false
password = "my_pe_pw"
prism_element_cluster_uuid = "my_pe_cluster_uuid"
storage_container = "my_storage_container_name"
username = "my_pe_username"
}
}
cni_config {
node_cidr_mask_size = 24
pod_ipv4_cidr = "172.20.0.0/16"
service_ipv4_cidr = "172.19.0.0/16"
}
worker_node_pool {
node_os_version = "ntnx-1.0"
num_instances = 1
ahv_config {
network_uuid = "my_subnet_id"
prism_element_cluster_uuid = "my_pe_cluster_uuid"
}
}
etcd_node_pool {
node_os_version = "ntnx-1.0"
num_instances = 1
ahv_config {
network_uuid = "my_subnet_id"
prism_element_cluster_uuid = "my_pe_cluster_uuid"
}
}
master_node_pool {
node_os_version = "ntnx-1.0"
num_instances = 1
ahv_config {
network_uuid = "my_subnet_id"
prism_element_cluster_uuid = "my_pe_cluster_uuid"
}
}
timeouts {
create = "1h"
update = "30m"
delete = "10m"
}
}
```


## Argument Reference

The following arguments are supported:
Expand Down Expand Up @@ -153,6 +208,8 @@ The `etcd_node_pool`, `master_node_pool`, `worker_node_pool` attribute supports
* `calico_config.ip_pool_config`: - (Optional) List of IP pools to be configured/managed by calico.
* `calico_config.ip_pool_config.cidr`: - (Optional) IP range to use for this pool, it should fall within pod cidr.

* `timeouts`: timeouts can customize the default timeout on CRUD functions with default timeouts. Supports "h", "m" or "s" .

**Note:** Updates to this attribute forces new resource creation.

See detailed information in [Nutanix Karbon Cluster](https://www.nutanix.dev/reference/karbon/api-reference/cluster/).

0 comments on commit 240a796

Please sign in to comment.