Skip to content

Commit

Permalink
Wait until container cluster can be operated on. (#3989)
Browse files Browse the repository at this point in the history
Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored and nat-henderson committed Jul 9, 2019
1 parent d4c1228 commit 3532ff3
Showing 1 changed file with 43 additions and 13 deletions.
56 changes: 43 additions & 13 deletions google/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -912,7 +912,14 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
}
}

return resourceContainerClusterRead(d, meta)
if err := resourceContainerClusterRead(d, meta); err != nil {
return err
}

if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutCreate)); err != nil {
return err
}
return nil
}

func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error {
Expand All @@ -928,21 +935,15 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
return err
}

cluster := &containerBeta.Cluster{}
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
name := containerClusterFullName(project, location, d.Get("name").(string))
cluster, err = config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do()
if err != nil {
return resource.NonRetryableError(err)
}
if cluster.Status != "RUNNING" {
return resource.RetryableError(fmt.Errorf("Cluster %q has status %q with message %q", d.Get("name"), cluster.Status, cluster.StatusMessage))
}
return nil
})
clusterName := d.Get("name").(string)
name := containerClusterFullName(project, location, clusterName)
cluster, err := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string)))
}
if cluster.Status == "ERROR" || cluster.Status == "DEGRADED" {
return fmt.Errorf("Cluster %q has status %q with message %q", d.Get("name"), cluster.Status, cluster.StatusMessage)
}

d.Set("name", cluster.Name)
if err := d.Set("network_policy", flattenNetworkPolicy(cluster.NetworkPolicy)); err != nil {
Expand Down Expand Up @@ -1038,6 +1039,10 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
clusterName := d.Get("name").(string)
timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes())

if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutUpdate)); err != nil {
return err
}

d.Partial(true)

lockKey := containerClusterMutexKey(project, location, clusterName)
Expand Down Expand Up @@ -1526,6 +1531,10 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er
clusterName := d.Get("name").(string)
timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes())

if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutDelete)); err != nil {
return err
}

log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string))
mutexKV.Lock(containerClusterMutexKey(project, location, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName))
Expand Down Expand Up @@ -1604,6 +1613,24 @@ func cleanFailedContainerCluster(d *schema.ResourceData, meta interface{}) error
return nil
}

func waitForContainerClusterReady(config *Config, project, location, clusterName string, timeout time.Duration) error {
return resource.Retry(timeout, func() *resource.RetryError {
name := containerClusterFullName(project, location, clusterName)
cluster, err := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do()
if err != nil {
return resource.NonRetryableError(err)
}
if cluster.Status == "PROVISIONING" || cluster.Status == "RECONCILING" || cluster.Status == "STOPPING" {
return resource.RetryableError(fmt.Errorf("Cluster %q has status %q with message %q", clusterName, cluster.Status, cluster.StatusMessage))
} else if cluster.Status == "RUNNING" {
log.Printf("Cluster %q has status 'RUNNING'.", clusterName)
return nil
} else {
return resource.NonRetryableError(fmt.Errorf("Cluster %q has terminal state %q with message %q.", clusterName, cluster.Status, cluster.StatusMessage))
}
})
}

// container engine's API currently mistakenly returns the instance group manager's
// URL instead of the instance group's URL in its responses. This shim detects that
// error, and corrects it, by fetching the instance group manager URL and retrieving
Expand Down Expand Up @@ -2013,6 +2040,9 @@ func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interfac

d.Set("name", clusterName)
d.SetId(clusterName)
if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutCreate)); err != nil {
return nil, err
}

return []*schema.ResourceData{d}, nil
}
Expand Down

0 comments on commit 3532ff3

Please sign in to comment.