Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Google Cloud Bigtable replication #23

Merged
merged 1 commit into from
Oct 23, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
205 changes: 57 additions & 148 deletions google-beta/resource_bigtable_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"
"log"

"github.com/hashicorp/terraform/helper/customdiff"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"

Expand All @@ -16,13 +15,7 @@ func resourceBigtableInstance() *schema.Resource {
return &schema.Resource{
Create: resourceBigtableInstanceCreate,
Read: resourceBigtableInstanceRead,
// TODO: Update is only needed because we're doing forcenew in customizediff
// when we're done with the deprecation, we can drop customizediff and make cluster forcenew
Update: schema.Noop,
Delete: resourceBigtableInstanceDestroy,
CustomizeDiff: customdiff.All(
resourceBigTableInstanceClusterCustomizeDiff,
),

Schema: map[string]*schema.Schema{
"name": {
Expand All @@ -31,28 +24,20 @@ func resourceBigtableInstance() *schema.Resource {
ForceNew: true,
},

"cluster_id": {
Type: schema.TypeString,
Optional: true,
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},

"cluster": {
Type: schema.TypeSet,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{"cluster_id", "zone", "num_nodes", "storage_type"},
Type: schema.TypeSet,
Required: true,
ForceNew: true,
MaxItems: 2,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cluster_id": {
Type: schema.TypeString,
Optional: true,
Required: true,
},
"zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Required: true,
},
"num_nodes": {
Type: schema.TypeInt,
Expand All @@ -68,29 +53,13 @@ func resourceBigtableInstance() *schema.Resource {
},
},

"zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},

"display_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},

"num_nodes": {
Type: schema.TypeInt,
Optional: true,
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},

"instance_type": {
Type: schema.TypeString,
Optional: true,
Expand All @@ -99,67 +68,42 @@ func resourceBigtableInstance() *schema.Resource {
ValidateFunc: validation.StringInSlice([]string{"DEVELOPMENT", "PRODUCTION"}, false),
},

"storage_type": {
Type: schema.TypeString,
Optional: true,
Default: "SSD",
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},

"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
},
}
}

func resourceBigTableInstanceClusterCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error {
if d.Get("cluster_id").(string) == "" && d.Get("cluster.#").(int) == 0 {
return fmt.Errorf("At least one cluster must be set.")
}
if !d.HasChange("cluster_id") && !d.HasChange("zone") && !d.HasChange("num_nodes") &&
!d.HasChange("storage_type") && !d.HasChange("cluster") {
return nil
}
if d.Get("cluster.#").(int) == 1 {
// if we have exactly one cluster, and it has the same values as the old top-level
// values, we can assume the user is trying to go from the deprecated values to the
// new values, and we shouldn't ForceNew. We know that the top-level values aren't
// set, because they ConflictWith cluster.
oldID, _ := d.GetChange("cluster_id")
oldNodes, _ := d.GetChange("num_nodes")
oldZone, _ := d.GetChange("zone")
oldStorageType, _ := d.GetChange("storage_type")
new := d.Get("cluster").(*schema.Set).List()[0].(map[string]interface{})

if oldID.(string) == new["cluster_id"].(string) &&
oldNodes.(int) == new["num_nodes"].(int) &&
oldZone.(string) == new["zone"].(string) &&
oldStorageType.(string) == new["storage_type"].(string) {
return nil
}
}
if d.HasChange("cluster_id") {
d.ForceNew("cluster_id")
}
if d.HasChange("cluster") {
d.ForceNew("cluster")
}
if d.HasChange("zone") {
d.ForceNew("zone")
}
if d.HasChange("num_nodes") {
d.ForceNew("num_nodes")
}
if d.HasChange("storage_type") {
d.ForceNew("storage_type")
"cluster_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Removed: "Use cluster instead.",
},

"zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Removed: "Use cluster instead.",
},

"num_nodes": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Removed: "Use cluster instead.",
},

"storage_type": {
Type: schema.TypeString,
Optional: true,
Computed: true,
Removed: "Use cluster instead.",
},
},
}
return nil
}

func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error {
Expand Down Expand Up @@ -188,31 +132,9 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
conf.InstanceType = bigtable.PRODUCTION
}

if d.Get("cluster.#").(int) > 0 {
// expand cluster
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID, config.Zone)
if err != nil {
return fmt.Errorf("error expanding clusters: %s", err.Error())
}
} else {
// TODO: remove this when we're done with the deprecation period
zone, err := getZone(d, config)
if err != nil {
return err
}
cluster := bigtable.ClusterConfig{
InstanceID: conf.InstanceID,
NumNodes: int32(d.Get("num_nodes").(int)),
Zone: zone,
ClusterID: d.Get("cluster_id").(string),
}
switch d.Get("storage_type").(string) {
case "HDD":
cluster.StorageType = bigtable.HDD
case "SSD":
cluster.StorageType = bigtable.SSD
}
conf.Clusters = append(conf.Clusters, cluster)
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID)
if err != nil {
return fmt.Errorf("error expanding clusters: %s", err.Error())
}

c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
Expand Down Expand Up @@ -256,37 +178,27 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
}

d.Set("project", project)
if d.Get("cluster.#").(int) > 0 {
clusters := d.Get("cluster").(*schema.Set).List()
clusterState := []map[string]interface{}{}
for _, cl := range clusters {
cluster := cl.(map[string]interface{})
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
if err != nil {
if isGoogleApiErrorWithCode(err, 404) {
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
continue
}
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
}
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
}
err = d.Set("cluster", clusterState)
if err != nil {
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
}
d.Set("cluster_id", "")
d.Set("zone", "")
d.Set("num_nodes", 0)
d.Set("storage_type", "SSD")
} else {
// TODO remove this when we're done with our deprecation period
zone, err := getZone(d, config)

clusters := d.Get("cluster").(*schema.Set).List()
clusterState := []map[string]interface{}{}
for _, cl := range clusters {
cluster := cl.(map[string]interface{})
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
if err != nil {
return err
if isGoogleApiErrorWithCode(err, 404) {
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
continue
}
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
}
d.Set("zone", zone)
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
}

err = d.Set("cluster", clusterState)
if err != nil {
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
}

d.Set("name", instance.Name)
d.Set("display_name", instance.DisplayName)

Expand Down Expand Up @@ -329,14 +241,11 @@ func flattenBigtableCluster(c *bigtable.ClusterInfo, storageType string) map[str
}
}

func expandBigtableClusters(clusters []interface{}, instanceID string, defaultZone string) []bigtable.ClusterConfig {
func expandBigtableClusters(clusters []interface{}, instanceID string) []bigtable.ClusterConfig {
results := make([]bigtable.ClusterConfig, 0, len(clusters))
for _, c := range clusters {
cluster := c.(map[string]interface{})
zone := defaultZone
if confZone, ok := cluster["zone"]; ok {
zone = confZone.(string)
}
zone := cluster["zone"].(string)
var storageType bigtable.StorageType
switch cluster["storage_type"].(string) {
case "SSD":
Expand Down
49 changes: 45 additions & 4 deletions google-beta/resource_bigtable_instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,27 @@ func TestAccBigtableInstance_basic(t *testing.T) {
})
}

func TestAccBigtableInstance_cluster(t *testing.T) {
t.Parallel()

instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckBigtableInstanceDestroy,
Steps: []resource.TestStep{
{
Config: testAccBigtableInstance_cluster(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccBigtableInstanceExists(
"google_bigtable_instance.instance"),
),
},
},
})
}

func TestAccBigtableInstance_development(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -65,12 +86,12 @@ func testAccCheckBigtableInstanceDestroy(s *terraform.State) error {
return fmt.Errorf("Error starting instance admin client. %s", err)
}

defer c.Close()

_, err = c.InstanceInfo(ctx, rs.Primary.Attributes["name"])
if err == nil {
return fmt.Errorf("Instance %s still exists.", rs.Primary.Attributes["name"])
}

c.Close()
}

return nil
Expand All @@ -93,13 +114,13 @@ func testAccBigtableInstanceExists(n string) resource.TestCheckFunc {
return fmt.Errorf("Error starting instance admin client. %s", err)
}

defer c.Close()

_, err = c.InstanceInfo(ctx, rs.Primary.Attributes["name"])
if err != nil {
return fmt.Errorf("Error retrieving instance %s.", rs.Primary.Attributes["name"])
}

c.Close()

return nil
}
}
Expand All @@ -118,6 +139,26 @@ resource "google_bigtable_instance" "instance" {
`, instanceName, instanceName)
}

func testAccBigtableInstance_cluster(instanceName string) string {
return fmt.Sprintf(`
resource "google_bigtable_instance" "instance" {
name = "%s"
cluster {
cluster_id = "%s-a"
zone = "us-central1-b"
num_nodes = 3
storage_type = "HDD"
}
cluster {
cluster_id = "%s-b"
zone = "us-central1-c"
num_nodes = 3
storage_type = "HDD"
}
}
`, instanceName, instanceName, instanceName)
}

func testAccBigtableInstance_development(instanceName string) string {
return fmt.Sprintf(`
resource "google_bigtable_instance" "instance" {
Expand Down