diff --git a/build/terraform b/build/terraform index 51e63bfff2d2..40ebce2dfc4d 160000 --- a/build/terraform +++ b/build/terraform @@ -1 +1 @@ -Subproject commit 51e63bfff2d2acba78bdbb35227669b820a4d61e +Subproject commit 40ebce2dfc4dcaa72b48758ae4e2fefaf51e4e55 diff --git a/provider/terraform/common~copy.yaml b/provider/terraform/common~copy.yaml index 107958590b59..45ebba91fbb6 100644 --- a/provider/terraform/common~copy.yaml +++ b/provider/terraform/common~copy.yaml @@ -14,13 +14,28 @@ # the final module tree structure: <% dir = _version_name == 'beta' ? 'google-beta' : 'google' - Dir["templates/terraform/tests/*#{api.prefix[1..-1]}*"].each do |file_path| - fname = file_path.split('/')[-1] -%> # Handwritten acceptance tests for autogenerated resources. # Adding them here allows updating the tests as part of a MM pull request. +<% Dir["templates/terraform/tests/*#{api.prefix[1..-1]}*"].each do |file_path| + fname = file_path.split('/')[-1] +-%> '<%= dir -%>/<%= fname -%>': 'templates/terraform/tests/<%= fname -%>' <% end -%> +# Copy all of the terraform resources that are still hand written +<% + Dir["provider/terraform/resources/*#{api.prefix[1..-1]}*"].each do |file_path| + fname = file_path.split('/')[-1] +-%> +'<%= dir -%>/<%= fname -%>': 'provider/terraform/resources/<%= fname -%>' +<% end -%> +<% + Dir["provider/terraform/data_sources/*#{api.prefix[1..-1]}*"].each do |file_path| + fname = file_path.split('/')[-1] +-%> +'<%= dir -%>/<%= fname -%>': 'provider/terraform/data_sources/<%= fname -%>' +<% end -%> + '<%= dir -%>/transport.go': 'templates/terraform/transport.go' '<%= dir -%>/transport_test.go': 'templates/terraform/transport_test.go' '<%= dir -%>/import.go': 'templates/terraform/import.go' diff --git a/provider/terraform/data_sources/data_source_compute_lb_ip_ranges.go b/provider/terraform/data_sources/data_source_compute_lb_ip_ranges.go new file mode 100644 index 000000000000..a5a6309147e7 --- /dev/null +++ b/provider/terraform/data_sources/data_source_compute_lb_ip_ranges.go @@ -0,0 +1,45 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeLbIpRanges() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeLbIpRangesRead, + + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "http_ssl_tcp_internal": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleComputeLbIpRangesRead(d *schema.ResourceData, meta interface{}) error { + d.SetId("compute-lb-ip-ranges") + + // https://cloud.google.com/compute/docs/load-balancing/health-checks#health_check_source_ips_and_firewall_rules + + networkIpRanges := []string{ + "209.85.152.0/22", + "209.85.204.0/22", + "35.191.0.0/16", + } + d.Set("network", networkIpRanges) + + httpSslTcpInternalRanges := []string{ + "130.211.0.0/22", + "35.191.0.0/16", + } + d.Set("http_ssl_tcp_internal", httpSslTcpInternalRanges) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_container_registry_image.go b/provider/terraform/data_sources/data_source_container_registry_image.go new file mode 100644 index 000000000000..093784b5f3fc --- /dev/null +++ b/provider/terraform/data_sources/data_source_container_registry_image.go @@ -0,0 +1,67 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleContainerImage() *schema.Resource { + return &schema.Resource{ + Read: containerRegistryImageRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "tag": { + Type: schema.TypeString, + Optional: true, + }, + "digest": { + Type: schema.TypeString, + Optional: true, + }, + "region": { + Type: schema.TypeString, + Optional: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "image_url": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func containerRegistryImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + d.Set("project", project) + region, ok := d.GetOk("region") + var url_base string + if ok && region != nil && region != "" { + url_base = fmt.Sprintf("%s.gcr.io/%s", region, project) + } else { + url_base = fmt.Sprintf("gcr.io/%s", project) + } + tag, t_ok := d.GetOk("tag") + digest, d_ok := d.GetOk("digest") + if t_ok && tag != nil && tag != "" { + d.Set("image_url", fmt.Sprintf("%s/%s:%s", url_base, d.Get("name").(string), tag)) + } else if d_ok && digest != nil && digest != "" { + d.Set("image_url", fmt.Sprintf("%s/%s@%s", url_base, d.Get("name").(string), digest)) + } else { + d.Set("image_url", fmt.Sprintf("%s/%s", url_base, d.Get("name").(string))) + } + d.SetId(d.Get("image_url").(string)) + return nil +} diff --git a/provider/terraform/data_sources/data_source_container_registry_repository.go b/provider/terraform/data_sources/data_source_container_registry_repository.go new file mode 100644 index 000000000000..1ddcae3b60b5 --- /dev/null +++ b/provider/terraform/data_sources/data_source_container_registry_repository.go @@ -0,0 +1,45 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleContainerRepo() *schema.Resource { + return &schema.Resource{ + Read: containerRegistryRepoRead, + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "repository_url": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func containerRegistryRepoRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + d.Set("project", project) + region, ok := d.GetOk("region") + if ok && region != nil && region != "" { + d.Set("repository_url", fmt.Sprintf("%s.gcr.io/%s", region, project)) + } else { + d.Set("repository_url", fmt.Sprintf("gcr.io/%s", project)) + } + d.SetId(d.Get("repository_url").(string)) + return nil +} diff --git a/provider/terraform/data_sources/data_source_dns_managed_zone.go b/provider/terraform/data_sources/data_source_dns_managed_zone.go new file mode 100644 index 000000000000..05f96458e9e5 --- /dev/null +++ b/provider/terraform/data_sources/data_source_dns_managed_zone.go @@ -0,0 +1,65 @@ +package google + +import "github.com/hashicorp/terraform/helper/schema" + +func dataSourceDnsManagedZone() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsManagedZoneRead, + + Schema: map[string]*schema.Schema{ + "dns_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name_servers": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.SetId(d.Get("name").(string)) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := config.clientDns.ManagedZones.Get( + project, d.Id()).Do() + if err != nil { + return err + } + + d.Set("name_servers", zone.NameServers) + d.Set("name", zone.Name) + d.Set("dns_name", zone.DnsName) + d.Set("description", zone.Description) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_active_folder.go b/provider/terraform/data_sources/data_source_google_active_folder.go new file mode 100644 index 000000000000..3d64ea2cf6f9 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_active_folder.go @@ -0,0 +1,55 @@ +package google + +import ( + "fmt" + "net/url" + + "github.com/hashicorp/terraform/helper/schema" + resourceManagerV2Beta1 "google.golang.org/api/cloudresourcemanager/v2beta1" +) + +func dataSourceGoogleActiveFolder() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleActiveFolderRead, + + Schema: map[string]*schema.Schema{ + "parent": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "display_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleActiveFolderRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + parent := d.Get("parent").(string) + displayName := d.Get("display_name").(string) + + queryString := fmt.Sprintf("lifecycleState=ACTIVE AND parent=%s AND displayName=%s", parent, url.QueryEscape(displayName)) + searchRequest := &resourceManagerV2Beta1.SearchFoldersRequest{ + Query: queryString, + } + searchResponse, err := config.clientResourceManagerV2Beta1.Folders.Search(searchRequest).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Folder Not Found : %s", displayName)) + } + + for _, folder := range searchResponse.Folders { + if folder.DisplayName == displayName { + d.SetId(folder.Name) + d.Set("name", folder.Name) + return nil + } + } + return fmt.Errorf("Folder not found") +} diff --git a/provider/terraform/data_sources/data_source_google_billing_account.go b/provider/terraform/data_sources/data_source_google_billing_account.go new file mode 100644 index 000000000000..a29e6eb69dbe --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_billing_account.go @@ -0,0 +1,125 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/cloudbilling/v1" +) + +func dataSourceGoogleBillingAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceBillingAccountRead, + Schema: map[string]*schema.Schema{ + "billing_account": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"display_name"}, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"billing_account"}, + }, + "open": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "project_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func dataSourceBillingAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + open, openOk := d.GetOkExists("open") + + var billingAccount *cloudbilling.BillingAccount + if v, ok := d.GetOk("billing_account"); ok { + resp, err := config.clientBilling.BillingAccounts.Get(canonicalBillingAccountName(v.(string))).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Billing Account Not Found : %s", v)) + } + + if openOk && resp.Open != open.(bool) { + return fmt.Errorf("Billing account not found: %s", v) + } + + billingAccount = resp + } else if v, ok := d.GetOk("display_name"); ok { + token := "" + for paginate := true; paginate; { + resp, err := config.clientBilling.BillingAccounts.List().PageToken(token).Do() + if err != nil { + return fmt.Errorf("Error reading billing accounts: %s", err) + } + + for _, ba := range resp.BillingAccounts { + if ba.DisplayName == v.(string) { + if openOk && ba.Open != open.(bool) { + continue + } + if billingAccount != nil { + return fmt.Errorf("More than one matching billing account found") + } + billingAccount = ba + } + } + + token = resp.NextPageToken + paginate = token != "" + } + + if billingAccount == nil { + return fmt.Errorf("Billing account not found: %s", v) + } + } else { + return fmt.Errorf("one of billing_account or display_name must be set") + } + + resp, err := config.clientBilling.BillingAccounts.Projects.List(billingAccount.Name).Do() + if err != nil { + return fmt.Errorf("Error reading billing account projects: %s", err) + } + projectIds := flattenBillingProjects(resp.ProjectBillingInfo) + + d.SetId(GetResourceNameFromSelfLink(billingAccount.Name)) + d.Set("name", billingAccount.Name) + d.Set("display_name", billingAccount.DisplayName) + d.Set("open", billingAccount.Open) + d.Set("project_ids", projectIds) + + return nil +} + +func canonicalBillingAccountName(ba string) string { + if strings.HasPrefix(ba, "billingAccounts/") { + return ba + } + + return "billingAccounts/" + ba +} + +func flattenBillingProjects(billingProjects []*cloudbilling.ProjectBillingInfo) []string { + projectIds := make([]string, len(billingProjects)) + for i, billingProject := range billingProjects { + projectIds[i] = billingProject.ProjectId + } + + return projectIds +} diff --git a/provider/terraform/data_sources/data_source_google_client_config.go b/provider/terraform/data_sources/data_source_google_client_config.go new file mode 100644 index 000000000000..90f01aca24af --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_client_config.go @@ -0,0 +1,46 @@ +package google + +import ( + "time" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleClientConfig() *schema.Resource { + return &schema.Resource{ + Read: dataSourceClientConfigRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + }, + + "region": { + Type: schema.TypeString, + Computed: true, + }, + + "access_token": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + } +} + +func dataSourceClientConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.SetId(time.Now().UTC().String()) + d.Set("project", config.Project) + d.Set("region", config.Region) + + token, err := config.tokenSource.Token() + if err != nil { + return err + } + d.Set("access_token", token.AccessToken) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_cloudfunctions_function.go b/provider/terraform/data_sources/data_source_google_cloudfunctions_function.go new file mode 100644 index 000000000000..f790162df7ad --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_cloudfunctions_function.go @@ -0,0 +1,65 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleCloudFunctionsFunction() *schema.Resource { + // Generate datasource schema from resource + dsSchema := datasourceSchemaFromResourceSchema(resourceCloudFunctionsFunction().Schema) + + // Set 'Required' schema elements + addRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + addOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleCloudFunctionsFunctionRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleCloudFunctionsFunctionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + cloudFuncId := &cloudFunctionId{ + Project: project, + Region: region, + Name: d.Get("name").(string), + } + + d.SetId(cloudFuncId.terraformId()) + + // terrible hack, remove when these fields are removed + // We're temporarily reading these fields only when they are set + // so we need them to be set with bad values entering read + // and then unset if those bad values are still there + d.Set("trigger_topic", "invalid") + d.Set("trigger_bucket", "invalid") + + err = resourceCloudFunctionsRead(d, meta) + if err != nil { + return err + } + + // terrible hack, remove when these fields are removed. see above + if v := d.Get("trigger_topic").(string); v == "invalid" { + d.Set("trigger_topic", "") + } + if v := d.Get("trigger_bucket").(string); v == "invalid" { + d.Set("trigger_bucket", "") + } + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_address.go b/provider/terraform/data_sources/data_source_google_compute_address.go new file mode 100644 index 000000000000..137c4157afff --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_address.go @@ -0,0 +1,143 @@ +package google + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" +) + +var ( + computeAddressIdTemplate = "projects/%s/regions/%s/addresses/%s" + computeAddressLinkRegex = regexp.MustCompile("projects/(.+)/regions/(.+)/addresses/(.+)$") +) + +func dataSourceGoogleComputeAddress() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeAddressRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + address, err := config.clientCompute.Addresses.Get(project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Address Not Found : %s", name)) + } + + d.Set("address", address.Address) + d.Set("status", address.Status) + d.Set("self_link", address.SelfLink) + d.Set("project", project) + d.Set("region", region) + + d.SetId(strconv.FormatUint(uint64(address.Id), 10)) + return nil +} + +type computeAddressId struct { + Project string + Region string + Name string +} + +func (s computeAddressId) canonicalId() string { + return fmt.Sprintf(computeAddressIdTemplate, s.Project, s.Region, s.Name) +} + +func parseComputeAddressId(id string, config *Config) (*computeAddressId, error) { + var parts []string + if computeAddressLinkRegex.MatchString(id) { + parts = computeAddressLinkRegex.FindStringSubmatch(id) + + return &computeAddressId{ + Project: parts[1], + Region: parts[2], + Name: parts[3], + }, nil + } else { + parts = strings.Split(id, "/") + } + + if len(parts) == 3 { + return &computeAddressId{ + Project: parts[0], + Region: parts[1], + Name: parts[2], + }, nil + } else if len(parts) == 2 { + // Project is optional. + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{region}/{name}` id format.") + } + + return &computeAddressId{ + Project: config.Project, + Region: parts[0], + Name: parts[1], + }, nil + } else if len(parts) == 1 { + // Project and region is optional + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") + } + if config.Region == "" { + return nil, fmt.Errorf("The default region for the provider must be set when using the `{name}` id format.") + } + + return &computeAddressId{ + Project: config.Project, + Region: config.Region, + Name: parts[0], + }, nil + } + + return nil, fmt.Errorf("Invalid compute address id. Expecting resource link, `{project}/{region}/{name}`, `{region}/{name}` or `{name}` format.") +} diff --git a/provider/terraform/data_sources/data_source_google_compute_backend_service.go b/provider/terraform/data_sources/data_source_google_compute_backend_service.go new file mode 100644 index 000000000000..40b883252b1d --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_backend_service.go @@ -0,0 +1,28 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeBackendService() *schema.Resource { + dsSchema := datasourceSchemaFromResourceSchema(resourceComputeBackendService().Schema) + + // Set 'Required' schema elements + addRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + addOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceComputeBackendServiceRead, + Schema: dsSchema, + } +} + +func dataSourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + serviceName := d.Get("name").(string) + + d.SetId(serviceName) + + return resourceComputeBackendServiceRead(d, meta) +} diff --git a/provider/terraform/data_sources/data_source_google_compute_default_service_account.go b/provider/terraform/data_sources/data_source_google_compute_default_service_account.go new file mode 100644 index 000000000000..01093b3e7a7f --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_default_service_account.go @@ -0,0 +1,41 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeDefaultServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeDefaultServiceAccountRead, + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleComputeDefaultServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + projectCompResource, err := config.clientCompute.Projects.Get(project).Do() + if err != nil { + return handleNotFoundError(err, d, "GCE service account not found") + } + + d.SetId(projectCompResource.DefaultServiceAccount) + d.Set("email", projectCompResource.DefaultServiceAccount) + d.Set("project", project) + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_forwarding_rule.go b/provider/terraform/data_sources/data_source_google_compute_forwarding_rule.go new file mode 100644 index 000000000000..9fb8f305be4f --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_forwarding_rule.go @@ -0,0 +1,128 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeForwardingRule() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeForwardingRuleRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "backend_service": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "load_balancing_scheme": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ports": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + frule, err := config.clientCompute.ForwardingRules.Get( + project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule Not Found : %s", name)) + } + d.SetId(frule.Name) + + d.Set("self_link", frule.SelfLink) + d.Set("description", frule.Description) + d.Set("backend_service", frule.BackendService) + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("load_balancing_scheme", frule.LoadBalancingScheme) + d.Set("name", frule.Name) + d.Set("port_range", frule.PortRange) + d.Set("ports", frule.Ports) + d.Set("subnetwork", frule.Subnetwork) + d.Set("network", frule.Network) + d.Set("target", frule.Target) + d.Set("project", project) + d.Set("region", region) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_global_address.go b/provider/terraform/data_sources/data_source_google_compute_global_address.go new file mode 100644 index 000000000000..d0ac1728f872 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_global_address.go @@ -0,0 +1,64 @@ +package google + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeGlobalAddressRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + address, err := config.clientCompute.GlobalAddresses.Get(project, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Global Address Not Found : %s", name)) + } + + d.Set("address", address.Address) + d.Set("status", address.Status) + d.Set("self_link", address.SelfLink) + d.Set("project", project) + + d.SetId(strconv.FormatUint(uint64(address.Id), 10)) + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_image.go b/provider/terraform/data_sources/data_source_google_compute_image.go new file mode 100644 index 000000000000..006bfb9233a6 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_image.go @@ -0,0 +1,168 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + compute "google.golang.org/api/compute/v1" +) + +func dataSourceGoogleComputeImage() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeImageRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"family"}, + }, + "family": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ConflictsWith: []string{"name"}, + }, + "archive_size_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + }, + "image_id": { + Type: schema.TypeString, + Computed: true, + }, + "image_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Computed: true, + }, + "licenses": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Computed: true, + }, + "source_disk": { + Type: schema.TypeString, + Computed: true, + }, + "source_disk_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + }, + "source_disk_id": { + Type: schema.TypeString, + Computed: true, + }, + "source_image_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func dataSourceGoogleComputeImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + params := []string{project} + var image *compute.Image + if v, ok := d.GetOk("name"); ok { + params = append(params, v.(string)) + log.Printf("[DEBUG] Fetching image %s", v.(string)) + image, err = config.clientCompute.Images.Get(project, v.(string)).Do() + log.Printf("[DEBUG] Fetched image %s", v.(string)) + } else if v, ok := d.GetOk("family"); ok { + params = append(params, "family", v.(string)) + log.Printf("[DEBUG] Fetching latest non-deprecated image from family %s", v.(string)) + image, err = config.clientCompute.Images.GetFromFamily(project, v.(string)).Do() + log.Printf("[DEBUG] Fetched latest non-deprecated image from family %s", v.(string)) + } else { + return fmt.Errorf("one of name or family must be set") + } + + if err != nil { + return fmt.Errorf("error retrieving image information: %s", err) + } + + var ieks256, sdeks256 string + + if image.SourceDiskEncryptionKey != nil { + sdeks256 = image.SourceDiskEncryptionKey.Sha256 + } + + if image.ImageEncryptionKey != nil { + ieks256 = image.ImageEncryptionKey.Sha256 + } + + d.Set("project", project) + d.Set("name", image.Name) + d.Set("family", image.Family) + d.Set("archive_size_bytes", image.ArchiveSizeBytes) + d.Set("creation_timestamp", image.CreationTimestamp) + d.Set("description", image.Description) + d.Set("disk_size_gb", image.DiskSizeGb) + d.Set("image_id", strconv.FormatUint(image.Id, 10)) + d.Set("image_encryption_key_sha256", ieks256) + d.Set("label_fingerprint", image.LabelFingerprint) + d.Set("labels", image.Labels) + d.Set("licenses", image.Licenses) + d.Set("self_link", image.SelfLink) + d.Set("source_disk", image.SourceDisk) + d.Set("source_disk_encryption_key_sha256", sdeks256) + d.Set("source_disk_id", image.SourceDiskId) + d.Set("source_image_id", image.SourceImageId) + d.Set("status", image.Status) + + d.SetId(strings.Join(params, "/")) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_instance.go b/provider/terraform/data_sources/data_source_google_compute_instance.go new file mode 100644 index 000000000000..b3a545253828 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_instance.go @@ -0,0 +1,150 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeInstance() *schema.Resource { + // Generate datasource schema from resource + dsSchema := datasourceSchemaFromResourceSchema(resourceComputeInstance().Schema) + + // Set 'Required' schema elements + addRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + addOptionalFieldsToSchema(dsSchema, "project", "zone") + + return &schema.Resource{ + Read: dataSourceGoogleComputeInstanceRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, zone, name, err := GetZonalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + + instance, err := config.clientComputeBeta.Instances.Get(project, zone, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", name)) + } + + md := flattenMetadataBeta(instance.Metadata) + if err = d.Set("metadata", md); err != nil { + return fmt.Errorf("error setting metadata: %s", err) + } + + d.Set("can_ip_forward", instance.CanIpForward) + d.Set("machine_type", GetResourceNameFromSelfLink(instance.MachineType)) + + // Set the networks + // Use the first external IP found for the default connection info. + networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) + if err != nil { + return err + } + if err := d.Set("network_interface", networkInterfaces); err != nil { + return err + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP + } + + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": sshIP, + }) + + // Set the metadata fingerprint if there is one. + if instance.Metadata != nil { + d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) + } + + // Set the tags fingerprint if there is one. + if instance.Tags != nil { + d.Set("tags_fingerprint", instance.Tags.Fingerprint) + d.Set("tags", convertStringArrToInterface(instance.Tags.Items)) + } + + if err := d.Set("labels", instance.Labels); err != nil { + return err + } + + if instance.LabelFingerprint != "" { + d.Set("label_fingerprint", instance.LabelFingerprint) + } + + attachedDisks := []map[string]interface{}{} + scratchDisks := []map[string]interface{}{} + for _, disk := range instance.Disks { + if disk.Boot { + err = d.Set("boot_disk", flattenBootDisk(d, disk, config)) + if err != nil { + return err + } + } else if disk.Type == "SCRATCH" { + scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) + } else { + di := map[string]interface{}{ + "source": ConvertSelfLinkToV1(disk.Source), + "device_name": disk.DeviceName, + "mode": disk.Mode, + } + if key := disk.DiskEncryptionKey; key != nil { + di["disk_encryption_key_sha256"] = key.Sha256 + } + attachedDisks = append(attachedDisks, di) + } + } + // Remove nils from map in case there were disks in the config that were not present on read; + // i.e. a disk was detached out of band + ads := []map[string]interface{}{} + for _, d := range attachedDisks { + if d != nil { + ads = append(ads, d) + } + } + + err = d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)) + if err != nil { + return err + } + + err = d.Set("scheduling", flattenScheduling(instance.Scheduling)) + if err != nil { + return err + } + + err = d.Set("guest_accelerator", flattenGuestAccelerators(instance.GuestAccelerators)) + if err != nil { + return err + } + + err = d.Set("scratch_disk", scratchDisks) + if err != nil { + return err + } + + d.Set("attached_disk", ads) + d.Set("cpu_platform", instance.CpuPlatform) + d.Set("min_cpu_platform", instance.MinCpuPlatform) + d.Set("deletion_protection", instance.DeletionProtection) + d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)) + d.Set("instance_id", fmt.Sprintf("%d", instance.Id)) + d.Set("project", project) + d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)) + d.Set("name", instance.Name) + d.SetId(ConvertSelfLinkToV1(instance.SelfLink)) + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_instance_group.go b/provider/terraform/data_sources/data_source_google_compute_instance_group.go new file mode 100644 index 000000000000..a786b9b2d17b --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_instance_group.go @@ -0,0 +1,105 @@ +package google + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeInstanceGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceComputeInstanceGroupRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"self_link"}, + }, + + "self_link": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name", "zone"}, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"self_link"}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "instances": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "named_port": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "port": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + + "network": { + Type: schema.TypeString, + Computed: true, + }, + + "size": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + if name, ok := d.GetOk("name"); ok { + zone, err := getZone(d, config) + if err != nil { + return err + } + d.SetId(fmt.Sprintf("%s/%s", zone, name.(string))) + } else if selfLink, ok := d.GetOk("self_link"); ok { + parsed, err := ParseInstanceGroupFieldValue(selfLink.(string), d, config) + if err != nil { + return err + } + d.Set("name", parsed.Name) + d.Set("zone", parsed.Zone) + d.Set("project", parsed.Project) + d.SetId(fmt.Sprintf("%s/%s", parsed.Zone, parsed.Name)) + } else { + return errors.New("Must provide either `self_link` or `zone/name`") + } + + return resourceComputeInstanceGroupRead(d, meta) +} diff --git a/provider/terraform/data_sources/data_source_google_compute_network.go b/provider/terraform/data_sources/data_source_google_compute_network.go new file mode 100644 index 000000000000..17f5b915bb00 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_network.go @@ -0,0 +1,66 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeNetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeNetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "subnetworks_self_links": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + network, err := config.clientCompute.Networks.Get(project, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Network Not Found : %s", name)) + } + d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("self_link", network.SelfLink) + d.Set("description", network.Description) + d.Set("subnetworks_self_links", network.Subnetworks) + d.SetId(network.Name) + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_region_instance_group.go b/provider/terraform/data_sources/data_source_google_compute_region_instance_group.go new file mode 100644 index 000000000000..2fc17d045a8e --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_region_instance_group.go @@ -0,0 +1,146 @@ +package google + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func dataSourceGoogleComputeRegionInstanceGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceComputeRegionInstanceGroupRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "instances": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + }, + + "status": { + Type: schema.TypeString, + Required: true, + }, + + "named_ports": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "size": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceComputeRegionInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + + instanceGroup, err := config.clientCompute.RegionInstanceGroups.Get( + project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Region Instance Group %q", name)) + } + + members, err := config.clientCompute.RegionInstanceGroups.ListInstances( + project, region, name, &compute.RegionInstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't have any instances, which is okay. + d.Set("instances", nil) + } else { + return fmt.Errorf("Error reading RegionInstanceGroup Members: %s", err) + } + } else { + d.Set("instances", flattenInstancesWithNamedPorts(members.Items)) + } + d.SetId(strconv.FormatUint(instanceGroup.Id, 16)) + d.Set("self_link", instanceGroup.SelfLink) + d.Set("name", name) + d.Set("project", project) + d.Set("region", region) + return nil +} + +func flattenInstancesWithNamedPorts(insts []*compute.InstanceWithNamedPorts) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(insts)) + log.Printf("There were %d instances.\n", len(insts)) + for _, inst := range insts { + instMap := make(map[string]interface{}) + instMap["instance"] = inst.Instance + instMap["named_ports"] = flattenNamedPorts(inst.NamedPorts) + instMap["status"] = inst.Status + result = append(result, instMap) + } + return result +} + +func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(namedPorts)) + for _, namedPort := range namedPorts { + namedPortMap := make(map[string]interface{}) + namedPortMap["name"] = namedPort.Name + namedPortMap["port"] = namedPort.Port + result = append(result, namedPortMap) + } + return result +} diff --git a/provider/terraform/data_sources/data_source_google_compute_regions.go b/provider/terraform/data_sources/data_source_google_compute_regions.go new file mode 100644 index 000000000000..683bbc4c774d --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_regions.go @@ -0,0 +1,73 @@ +package google + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/compute/v1" +) + +func dataSourceGoogleComputeRegions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeRegionsRead, + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"UP", "DOWN"}, false), + }, + }, + } +} + +func dataSourceGoogleComputeRegionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + filter := "" + if s, ok := d.GetOk("status"); ok { + filter = fmt.Sprintf(" (status eq %s)", s) + } + + call := config.clientCompute.Regions.List(project).Filter(filter) + + resp, err := call.Do() + if err != nil { + return err + } + + regions := flattenRegions(resp.Items) + log.Printf("[DEBUG] Received Google Compute Regions: %q", regions) + + d.Set("names", regions) + d.Set("project", project) + d.SetId(time.Now().UTC().String()) + + return nil +} + +func flattenRegions(regions []*compute.Region) []string { + result := make([]string, len(regions), len(regions)) + for i, region := range regions { + result[i] = region.Name + } + sort.Strings(result) + return result +} diff --git a/provider/terraform/data_sources/data_source_google_compute_ssl_policy.go b/provider/terraform/data_sources/data_source_google_compute_ssl_policy.go new file mode 100644 index 000000000000..020c852c2c9f --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_ssl_policy.go @@ -0,0 +1,29 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeSslPolicy() *schema.Resource { + // Generate datasource schema from resource + dsSchema := datasourceSchemaFromResourceSchema(resourceComputeSslPolicy().Schema) + + // Set 'Required' schema elements + addRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + addOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: datasourceComputeSslPolicyRead, + Schema: dsSchema, + } +} + +func datasourceComputeSslPolicyRead(d *schema.ResourceData, meta interface{}) error { + policyName := d.Get("name").(string) + + d.SetId(policyName) + + return resourceComputeSslPolicyRead(d, meta) +} diff --git a/provider/terraform/data_sources/data_source_google_compute_subnetwork.go b/provider/terraform/data_sources/data_source_google_compute_subnetwork.go new file mode 100644 index 000000000000..b490a27df216 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_subnetwork.go @@ -0,0 +1,130 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + computeBeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" +) + +func dataSourceGoogleComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeSubnetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "private_ip_google_access": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + "secondary_ip_range": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "range_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "gateway_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + subnetwork, err := config.clientCompute.Subnetworks.Get(project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Subnetwork Not Found : %s", name)) + } + + d.Set("ip_cidr_range", subnetwork.IpCidrRange) + d.Set("private_ip_google_access", subnetwork.PrivateIpGoogleAccess) + d.Set("self_link", subnetwork.SelfLink) + d.Set("description", subnetwork.Description) + d.Set("gateway_address", subnetwork.GatewayAddress) + d.Set("network", subnetwork.Network) + d.Set("project", project) + d.Set("region", region) + // Flattening code defined in resource_compute_subnetwork.go + d.Set("secondary_ip_range", flattenSecondaryRanges(subnetwork.SecondaryIpRanges)) + + //Subnet id creation is defined in resource_compute_subnetwork.go + subnetwork.Region = region + d.SetId(createSubnetID(subnetwork)) + return nil +} + +func flattenSecondaryRanges(secondaryRanges []*compute.SubnetworkSecondaryRange) []map[string]interface{} { + secondaryRangesSchema := make([]map[string]interface{}, 0, len(secondaryRanges)) + for _, secondaryRange := range secondaryRanges { + data := map[string]interface{}{ + "range_name": secondaryRange.RangeName, + "ip_cidr_range": secondaryRange.IpCidrRange, + } + + secondaryRangesSchema = append(secondaryRangesSchema, data) + } + return secondaryRangesSchema +} + +func createSubnetID(s *compute.Subnetwork) string { + return fmt.Sprintf("%s/%s", s.Region, s.Name) +} + +func createSubnetIDBeta(s *computeBeta.Subnetwork) string { + return fmt.Sprintf("%s/%s", s.Region, s.Name) +} diff --git a/provider/terraform/data_sources/data_source_google_compute_vpn_gateway.go b/provider/terraform/data_sources/data_source_google_compute_vpn_gateway.go new file mode 100644 index 000000000000..e7ddf9217cb4 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_vpn_gateway.go @@ -0,0 +1,78 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func dataSourceGoogleComputeVpnGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeVpnGatewayRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + + gateway, err := vpnGatewaysService.Get(project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("VPN Gateway Not Found : %s", name)) + } + d.Set("network", gateway.Network) + d.Set("region", gateway.Region) + d.Set("self_link", gateway.SelfLink) + d.Set("description", gateway.Description) + d.Set("project", project) + d.SetId(gateway.Name) + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_compute_zones.go b/provider/terraform/data_sources/data_source_google_compute_zones.go new file mode 100644 index 000000000000..a6a48273fd31 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_compute_zones.go @@ -0,0 +1,87 @@ +package google + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/compute/v1" +) + +func dataSourceGoogleComputeZones() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeZonesRead, + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"UP", "DOWN"}, false), + }, + }, + } +} + +func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region := config.Region + if r, ok := d.GetOk("region"); ok { + region = r.(string) + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + regionUrl := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s", + project, region) + filter := fmt.Sprintf("(region eq %s)", regionUrl) + + if s, ok := d.GetOk("status"); ok { + filter += fmt.Sprintf(" (status eq %s)", s) + } + + call := config.clientCompute.Zones.List(project).Filter(filter) + + resp, err := call.Do() + if err != nil { + return err + } + + zones := flattenZones(resp.Items) + log.Printf("[DEBUG] Received Google Compute Zones: %q", zones) + + d.Set("names", zones) + d.Set("region", region) + d.Set("project", project) + d.SetId(time.Now().UTC().String()) + + return nil +} + +func flattenZones(zones []*compute.Zone) []string { + result := make([]string, len(zones), len(zones)) + for i, zone := range zones { + result[i] = zone.Name + } + sort.Strings(result) + return result +} diff --git a/provider/terraform/data_sources/data_source_google_container_cluster.go b/provider/terraform/data_sources/data_source_google_container_cluster.go new file mode 100644 index 000000000000..91d8159c7fa4 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_container_cluster.go @@ -0,0 +1,29 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleContainerCluster() *schema.Resource { + // Generate datasource schema from resource + dsSchema := datasourceSchemaFromResourceSchema(resourceContainerCluster().Schema) + + // Set 'Required' schema elements + addRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + addOptionalFieldsToSchema(dsSchema, "project", "zone", "region") + + return &schema.Resource{ + Read: datasourceContainerClusterRead, + Schema: dsSchema, + } +} + +func datasourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + clusterName := d.Get("name").(string) + + d.SetId(clusterName) + + return resourceContainerClusterRead(d, meta) +} diff --git a/provider/terraform/data_sources/data_source_google_container_engine_versions.go b/provider/terraform/data_sources/data_source_google_container_engine_versions.go new file mode 100644 index 000000000000..5e6e420195fd --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_container_engine_versions.go @@ -0,0 +1,88 @@ +package google + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleContainerEngineVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleContainerEngineVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + }, + "region": { + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"zone"}, + }, + "default_cluster_version": { + Type: schema.TypeString, + Computed: true, + }, + "latest_master_version": { + Type: schema.TypeString, + Computed: true, + }, + "latest_node_version": { + Type: schema.TypeString, + Computed: true, + }, + "valid_master_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "valid_node_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + location, err := getLocation(d, config) + if err != nil { + return err + } + if len(location) == 0 { + return fmt.Errorf("Cannot determine location: set zone or region in this data source or at provider-level") + } + + location = fmt.Sprintf("projects/%s/locations/%s", project, location) + resp, err := config.clientContainerBeta.Projects.Locations.GetServerConfig(location).Do() + if err != nil { + return fmt.Errorf("Error retrieving available container cluster versions: %s", err.Error()) + } + + d.Set("valid_master_versions", resp.ValidMasterVersions) + d.Set("default_cluster_version", resp.DefaultClusterVersion) + d.Set("valid_node_versions", resp.ValidNodeVersions) + if len(resp.ValidMasterVersions) > 0 { + d.Set("latest_master_version", resp.ValidMasterVersions[0]) + } + if len(resp.ValidNodeVersions) > 0 { + d.Set("latest_node_version", resp.ValidNodeVersions[0]) + } + + d.SetId(time.Now().UTC().String()) + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_folder.go b/provider/terraform/data_sources/data_source_google_folder.go new file mode 100644 index 000000000000..df634f99dc91 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_folder.go @@ -0,0 +1,108 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + resourceManagerV2Beta1 "google.golang.org/api/cloudresourcemanager/v2beta1" +) + +func dataSourceGoogleFolder() *schema.Resource { + return &schema.Resource{ + Read: dataSourceFolderRead, + Schema: map[string]*schema.Schema{ + "folder": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "parent": { + Type: schema.TypeString, + Computed: true, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + }, + "lookup_organization": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "organization": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceFolderRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + folderName := d.Get("folder").(string) + + folder, err := config.clientResourceManagerV2Beta1.Folders.Get(canonicalFolderName(folderName)).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Folder Not Found : %s", folderName)) + } + + d.SetId(GetResourceNameFromSelfLink(folder.Name)) + d.Set("name", folder.Name) + d.Set("parent", folder.Parent) + d.Set("display_name", folder.DisplayName) + d.Set("lifecycle_state", folder.LifecycleState) + d.Set("create_time", folder.CreateTime) + + if v, ok := d.GetOk("lookup_organization"); ok && v.(bool) { + organization, err := lookupOrganizationName(folder, config) + + if err != nil { + return err + } + + d.Set("organization", organization) + } + + return nil +} + +func canonicalFolderName(ba string) string { + if strings.HasPrefix(ba, "folders/") { + return ba + } + + return "folders/" + ba +} + +func lookupOrganizationName(folder *resourceManagerV2Beta1.Folder, config *Config) (string, error) { + parent := folder.Parent + + if parent == "" || strings.HasPrefix(parent, "organizations/") { + return parent, nil + } else if strings.HasPrefix(parent, "folders/") { + parentFolder, err := config.clientResourceManagerV2Beta1.Folders.Get(parent).Do() + + if err != nil { + return "", fmt.Errorf("Error getting parent folder '%s': %s", parent, err) + } + + return lookupOrganizationName(parentFolder, config) + } else { + return "", fmt.Errorf("Unknown parent type '%s' on folder '%s'", parent, folder.Name) + } +} diff --git a/provider/terraform/data_sources/data_source_google_iam_policy.go b/provider/terraform/data_sources/data_source_google_iam_policy.go new file mode 100644 index 000000000000..d917c42f1b62 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_iam_policy.go @@ -0,0 +1,90 @@ +package google + +import ( + "encoding/json" + "strconv" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var iamBinding *schema.Schema = &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + }, + "members": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, +} + +// dataSourceGoogleIamPolicy returns a *schema.Resource that allows a customer +// to express a Google Cloud IAM policy in a data resource. This is an example +// of how the schema would be used in a config: +// +// data "google_iam_policy" "admin" { +// binding { +// role = "roles/storage.objectViewer" +// members = [ +// "user:evanbrown@google.com", +// ] +// } +// } +func dataSourceGoogleIamPolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleIamPolicyRead, + Schema: map[string]*schema.Schema{ + "binding": iamBinding, + "policy_data": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// dataSourceGoogleIamPolicyRead reads a data source from config and writes it +// to state. +func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + var policy cloudresourcemanager.Policy + var bindings []*cloudresourcemanager.Binding + + // The schema supports multiple binding{} blocks + bset := d.Get("binding").(*schema.Set) + + // All binding{} blocks will be converted and stored in an array + bindings = make([]*cloudresourcemanager.Binding, bset.Len()) + policy.Bindings = bindings + + // Convert each config binding into a cloudresourcemanager.Binding + for i, v := range bset.List() { + binding := v.(map[string]interface{}) + policy.Bindings[i] = &cloudresourcemanager.Binding{ + Role: binding["role"].(string), + Members: convertStringSet(binding["members"].(*schema.Set)), + } + } + + // Marshal cloudresourcemanager.Policy to JSON suitable for storing in state + pjson, err := json.Marshal(&policy) + if err != nil { + // should never happen if the above code is correct + return err + } + pstring := string(pjson) + + d.Set("policy_data", pstring) + d.SetId(strconv.Itoa(hashcode.String(pstring))) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_kms_secret.go b/provider/terraform/data_sources/data_source_google_kms_secret.go new file mode 100644 index 000000000000..2dbdfcde1bd1 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_kms_secret.go @@ -0,0 +1,67 @@ +package google + +import ( + "google.golang.org/api/cloudkms/v1" + + "encoding/base64" + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "log" + "time" +) + +func dataSourceGoogleKmsSecret() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleKmsSecretRead, + Schema: map[string]*schema.Schema{ + "crypto_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "ciphertext": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "plaintext": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + } +} + +func dataSourceGoogleKmsSecretRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + cryptoKeyId, err := parseKmsCryptoKeyId(d.Get("crypto_key").(string), config) + + if err != nil { + return err + } + + ciphertext := d.Get("ciphertext").(string) + + kmsDecryptRequest := &cloudkms.DecryptRequest{ + Ciphertext: ciphertext, + } + + decryptResponse, err := config.clientKms.Projects.Locations.KeyRings.CryptoKeys.Decrypt(cryptoKeyId.cryptoKeyId(), kmsDecryptRequest).Do() + + if err != nil { + return fmt.Errorf("Error decrypting ciphertext: %s", err) + } + + plaintext, err := base64.StdEncoding.DecodeString(decryptResponse.Plaintext) + + if err != nil { + return fmt.Errorf("Error decoding base64 response: %s", err) + } + + log.Printf("[INFO] Successfully decrypted ciphertext: %s", ciphertext) + + d.Set("plaintext", string(plaintext[:])) + d.SetId(time.Now().UTC().String()) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_netblock_ip_ranges.go b/provider/terraform/data_sources/data_source_google_netblock_ip_ranges.go new file mode 100644 index 000000000000..15a286e1afc6 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_netblock_ip_ranges.go @@ -0,0 +1,128 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "io/ioutil" + "net/http" + "strings" +) + +func dataSourceGoogleNetblockIpRanges() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleNetblockIpRangesRead, + + Schema: map[string]*schema.Schema{ + "cidr_blocks": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "cidr_blocks_ipv4": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + "cidr_blocks_ipv6": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleNetblockIpRangesRead(d *schema.ResourceData, meta interface{}) error { + d.SetId("netblock-ip-ranges") + + // https://cloud.google.com/compute/docs/faq#where_can_i_find_product_name_short_ip_ranges + CidrBlocks, err := getCidrBlocks() + + if err != nil { + return err + } + + d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]) + d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]) + d.Set("cidr_blocks_ipv6", CidrBlocks["cidr_blocks_ipv6"]) + + return nil +} + +func netblock_request(name string) (string, error) { + const DNS_URL = "https://dns.google.com/resolve?name=%s&type=TXT" + + response, err := http.Get(fmt.Sprintf("https://dns.google.com/resolve?name=%s&type=TXT", name)) + + if err != nil { + return "", fmt.Errorf("Error from _cloud-netblocks: %s", err) + } + + defer response.Body.Close() + body, err := ioutil.ReadAll(response.Body) + + if err != nil { + return "", fmt.Errorf("Error to retrieve the domains list: %s", err) + } + + return string(body), nil +} + +func getCidrBlocks() (map[string][]string, error) { + const INITIAL_NETBLOCK_DNS = "_cloud-netblocks.googleusercontent.com" + var dnsNetblockList []string + cidrBlocks := make(map[string][]string) + + response, err := netblock_request(INITIAL_NETBLOCK_DNS) + + if err != nil { + return nil, err + } + + splitedResponse := strings.Split(string(response), " ") + + for _, sp := range splitedResponse { + if strings.HasPrefix(sp, "include:") { + dnsNetblock := strings.Replace(sp, "include:", "", 1) + dnsNetblockList = append(dnsNetblockList, dnsNetblock) + } + } + + for len(dnsNetblockList) > 0 { + + dnsNetblock := dnsNetblockList[0] + + dnsNetblockList[0] = "" + dnsNetblockList = dnsNetblockList[1:len(dnsNetblockList)] + + response, err = netblock_request(dnsNetblock) + + if err != nil { + return nil, err + } + + splitedResponse = strings.Split(string(response), " ") + + for _, sp := range splitedResponse { + if strings.HasPrefix(sp, "ip") { + + cdrBlock := strings.Split(sp, ":")[1] + cidrBlocks["cidr_blocks"] = append(cidrBlocks["cidr_blocks"], cdrBlock) + + if strings.HasPrefix(sp, "ip4") { + cdrBlock := strings.Replace(sp, "ip4:", "", 1) + cidrBlocks["cidr_blocks_ipv4"] = append(cidrBlocks["cidr_blocks_ipv4"], cdrBlock) + + } else if strings.HasPrefix(sp, "ip6") { + cdrBlock := strings.Replace(sp, "ip6:", "", 1) + cidrBlocks["cidr_blocks_ipv6"] = append(cidrBlocks["cidr_blocks_ipv6"], cdrBlock) + } + } else if strings.HasPrefix(sp, "include:") { + cidr_block := strings.Replace(sp, "include:", "", 1) + dnsNetblockList = append(dnsNetblockList, cidr_block) + } + } + } + + return cidrBlocks, nil +} diff --git a/provider/terraform/data_sources/data_source_google_organization.go b/provider/terraform/data_sources/data_source_google_organization.go new file mode 100644 index 000000000000..e419ae578a0f --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_organization.go @@ -0,0 +1,97 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/cloudresourcemanager/v1" +) + +func dataSourceGoogleOrganization() *schema.Resource { + return &schema.Resource{ + Read: dataSourceOrganizationRead, + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"organization"}, + }, + "organization": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"domain"}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + "directory_customer_id": { + Type: schema.TypeString, + Computed: true, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + }, + "lifecycle_state": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceOrganizationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + var organization *cloudresourcemanager.Organization + if v, ok := d.GetOk("domain"); ok { + filter := fmt.Sprintf("domain=%s", v.(string)) + resp, err := config.clientResourceManager.Organizations.Search(&cloudresourcemanager.SearchOrganizationsRequest{ + Filter: filter, + }).Do() + if err != nil { + return fmt.Errorf("Error reading organization: %s", err) + } + + if len(resp.Organizations) == 0 { + return fmt.Errorf("Organization not found: %s", v) + } + if len(resp.Organizations) > 1 { + return fmt.Errorf("More than one matching organization found") + } + + organization = resp.Organizations[0] + } else if v, ok := d.GetOk("organization"); ok { + resp, err := config.clientResourceManager.Organizations.Get(canonicalOrganizationName(v.(string))).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Organization Not Found : %s", v)) + } + + organization = resp + } else { + return fmt.Errorf("one of domain or organization must be set") + } + + d.SetId(GetResourceNameFromSelfLink(organization.Name)) + d.Set("name", organization.Name) + d.Set("domain", organization.DisplayName) + d.Set("create_time", organization.CreationTime) + d.Set("lifecycle_state", organization.LifecycleState) + if organization.Owner != nil { + d.Set("directory_customer_id", organization.Owner.DirectoryCustomerId) + } + + return nil +} + +func canonicalOrganizationName(ba string) string { + if strings.HasPrefix(ba, "organizations/") { + return ba + } + + return "organizations/" + ba +} diff --git a/provider/terraform/data_sources/data_source_google_project.go b/provider/terraform/data_sources/data_source_google_project.go new file mode 100644 index 000000000000..84f1a6149551 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_project.go @@ -0,0 +1,34 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleProject() *schema.Resource { + // Generate datasource schema from resource + dsSchema := datasourceSchemaFromResourceSchema(resourceGoogleProject().Schema) + + addOptionalFieldsToSchema(dsSchema, "project_id") + + return &schema.Resource{ + Read: datasourceGoogleProjectRead, + Schema: dsSchema, + } +} + +func datasourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if v, ok := d.GetOk("project_id"); ok { + project := v.(string) + d.SetId(project) + } else { + project, err := getProject(d, config) + if err != nil { + return err + } + d.SetId(project) + } + + return resourceGoogleProjectRead(d, meta) +} diff --git a/provider/terraform/data_sources/data_source_google_project_services.go b/provider/terraform/data_sources/data_source_google_project_services.go new file mode 100644 index 000000000000..62e4ec11abcf --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_project_services.go @@ -0,0 +1,30 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleProjectServices() *schema.Resource { + // Generate datasource schema from resource + dsSchema := datasourceSchemaFromResourceSchema(resourceGoogleProjectServices().Schema) + + // Set 'Optional' schema elements + addOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleProjectServicesRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + d.SetId(project) + + return resourceGoogleProjectServicesRead(d, meta) +} diff --git a/provider/terraform/data_sources/data_source_google_service_account.go b/provider/terraform/data_sources/data_source_google_service_account.go new file mode 100644 index 000000000000..e2a1f3cd66db --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_service_account.go @@ -0,0 +1,65 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountRead, + Schema: map[string]*schema.Schema{ + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRFC1035Name(6, 30), + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "display_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceAccountName, err := serviceAccountFQN(d.Get("account_id").(string), d, config) + if err != nil { + return err + } + + sa, err := config.clientIAM.Projects.ServiceAccounts.Get(serviceAccountName).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", serviceAccountName)) + } + + d.SetId(sa.Name) + d.Set("email", sa.Email) + d.Set("unique_id", sa.UniqueId) + d.Set("project", sa.ProjectId) + d.Set("account_id", strings.Split(sa.Email, "@")[0]) + d.Set("name", sa.Name) + d.Set("display_name", sa.DisplayName) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_google_service_account_key.go b/provider/terraform/data_sources/data_source_google_service_account_key.go new file mode 100644 index 000000000000..a673fe1554a8 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_service_account_key.go @@ -0,0 +1,98 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "regexp" +) + +func dataSourceGoogleServiceAccountKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleServiceAccountKeyRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateRegexp(ServiceAccountKeyNameRegex), + }, + "public_key_type": { + Type: schema.TypeString, + Default: "TYPE_X509_PEM_FILE", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"TYPE_NONE", "TYPE_X509_PEM_FILE", "TYPE_RAW_PUBLIC_KEY"}, false), + }, + "project": { + Type: schema.TypeString, + Optional: true, + }, + "key_algorithm": { + Type: schema.TypeString, + Computed: true, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + }, + "service_account_id": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"name"}, + Deprecated: "Please use name to specify full service account key path projects/{project}/serviceAccounts/{serviceAccount}/keys/{keyId}", + }, + }, + } +} + +func dataSourceGoogleServiceAccountKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + keyName, err := getDataSourceServiceAccountKeyName(d) + if err != nil { + return err + } + + publicKeyType := d.Get("public_key_type").(string) + + // Confirm the service account key exists + sak, err := config.clientIAM.Projects.ServiceAccounts.Keys.Get(keyName).PublicKeyType(publicKeyType).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", keyName)) + } + + d.SetId(sak.Name) + + d.Set("name", sak.Name) + d.Set("key_algorithm", sak.KeyAlgorithm) + d.Set("public_key", sak.PublicKeyData) + + return nil +} + +func getDataSourceServiceAccountKeyName(d *schema.ResourceData) (string, error) { + keyName := d.Get("name").(string) + keyFromSAId := d.Get("service_account_id").(string) + + // Neither name nor service_account_id specified + if keyName == "" && keyFromSAId == "" { + return "", fmt.Errorf("please use name to specify service account key being added as this data source") + } + + fullKeyName := keyName + if fullKeyName == "" { + // Key name specified as incorrectly named, deprecated service account ID field + fullKeyName = keyFromSAId + } + + // Validate name since interpolated values (i.e from a key or service + // account resource) will not get validated at plan time. + r := regexp.MustCompile(ServiceAccountKeyNameRegex) + if r.MatchString(fullKeyName) { + return fullKeyName, nil + } + + return "", fmt.Errorf("invalid key name %q does not match regexp %q", fullKeyName, ServiceAccountKeyNameRegex) +} diff --git a/provider/terraform/data_sources/data_source_google_storage_project_service_account.go b/provider/terraform/data_sources/data_source_google_storage_project_service_account.go new file mode 100644 index 000000000000..04fa4a43b3a6 --- /dev/null +++ b/provider/terraform/data_sources/data_source_google_storage_project_service_account.go @@ -0,0 +1,55 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleStorageProjectServiceAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleStorageProjectServiceAccountRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "user_project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "email_address": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleStorageProjectServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + serviceAccountGetRequest := config.clientStorage.Projects.ServiceAccount.Get(project) + + if v, ok := d.GetOk("user_project"); ok { + serviceAccountGetRequest = serviceAccountGetRequest.UserProject(v.(string)) + } + + serviceAccount, err := serviceAccountGetRequest.Do() + if err != nil { + return handleNotFoundError(err, d, "GCS service account not found") + } + + d.Set("project", project) + d.Set("email_address", serviceAccount.EmailAddress) + + d.SetId(serviceAccount.EmailAddress) + + return nil +} diff --git a/provider/terraform/data_sources/data_source_storage_object_signed_url.go b/provider/terraform/data_sources/data_source_storage_object_signed_url.go new file mode 100644 index 000000000000..0b2b78d92b6a --- /dev/null +++ b/provider/terraform/data_sources/data_source_storage_object_signed_url.go @@ -0,0 +1,360 @@ +package google + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "log" + "net/url" + "os" + "strconv" + "strings" + "time" + + "sort" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" +) + +const gcsBaseUrl = "https://storage.googleapis.com" +const googleCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" + +func dataSourceGoogleSignedUrl() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleSignedUrlRead, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "content_md5": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "credentials": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "duration": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "1h", + }, + "extension_headers": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ValidateFunc: validateExtensionHeaders, + }, + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "GET", + ValidateFunc: validation.StringInSlice([]string{"GET", "HEAD", "PUT", "DELETE"}, true), + }, + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "signed_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []error) { + hdrMap := v.(map[string]interface{}) + for k, _ := range hdrMap { + if !strings.HasPrefix(strings.ToLower(k), "x-goog-") { + errors = append(errors, fmt.Errorf( + "extension_header (%s) not valid, header name must begin with 'x-goog-'", k)) + } + } + return +} + +func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build UrlData object from data source attributes + urlData := &UrlData{} + + // HTTP Method + if method, ok := d.GetOk("http_method"); ok { + urlData.HttpMethod = method.(string) + } + + // convert duration to an expiration datetime (unix time in seconds) + durationString := "1h" + if v, ok := d.GetOk("duration"); ok { + durationString = v.(string) + } + duration, err := time.ParseDuration(durationString) + if err != nil { + return errwrap.Wrapf("could not parse duration: {{err}}", err) + } + expires := time.Now().Unix() + int64(duration.Seconds()) + urlData.Expires = int(expires) + + // content_md5 is optional + if v, ok := d.GetOk("content_md5"); ok { + urlData.ContentMd5 = v.(string) + } + + // content_type is optional + if v, ok := d.GetOk("content_type"); ok { + urlData.ContentType = v.(string) + } + + // extension_headers (x-goog-* HTTP headers) are optional + if v, ok := d.GetOk("extension_headers"); ok { + hdrMap := v.(map[string]interface{}) + + if len(hdrMap) > 0 { + urlData.HttpHeaders = make(map[string]string, len(hdrMap)) + for k, v := range hdrMap { + urlData.HttpHeaders[k] = v.(string) + } + } + } + + urlData.Path = fmt.Sprintf("/%s/%s", d.Get("bucket").(string), d.Get("path").(string)) + + // Load JWT Config from Google Credentials + jwtConfig, err := loadJwtConfig(d, config) + if err != nil { + return err + } + urlData.JwtConfig = jwtConfig + + // Construct URL + signedUrl, err := urlData.SignedUrl() + if err != nil { + return err + } + + // Success + d.Set("signed_url", signedUrl) + + encodedSig, err := urlData.EncodedSignature() + if err != nil { + return err + } + d.SetId(encodedSig) + + return nil +} + +// loadJwtConfig looks for credentials json in the following places, +// in order of preference: +// 1. `credentials` attribute of the datasource +// 2. `credentials` attribute in the provider definition. +// 3. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { + config := meta.(*Config) + + credentials := "" + if v, ok := d.GetOk("credentials"); ok { + log.Println("[DEBUG] using data source credentials to sign URL") + credentials = v.(string) + + } else if config.Credentials != "" { + log.Println("[DEBUG] using provider credentials to sign URL") + credentials = config.Credentials + + } else if filename := os.Getenv(googleCredentialsEnvVar); filename != "" { + log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials to sign URL") + credentials = filename + + } + + if strings.TrimSpace(credentials) != "" { + contents, _, err := pathorcontents.Read(credentials) + if err != nil { + return nil, errwrap.Wrapf("Error loading credentials: {{err}}", err) + } + + cfg, err := google.JWTConfigFromJSON([]byte(contents), "") + if err != nil { + return nil, errwrap.Wrapf("Error parsing credentials: {{err}}", err) + } + return cfg, nil + } + + return nil, errors.New("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") +} + +// parsePrivateKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +// copied from golang.org/x/oauth2/internal +func parsePrivateKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, errwrap.Wrapf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: {{err}}", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +// UrlData stores the values required to create a Signed Url +type UrlData struct { + JwtConfig *jwt.Config + ContentMd5 string + ContentType string + HttpMethod string + Expires int + HttpHeaders map[string]string + Path string +} + +// SigningString creates a string representation of the UrlData in a form ready for signing: +// see https://cloud.google.com/storage/docs/access-control/create-signed-urls-program +// Example output: +// ------------------- +// GET +// +// +// 1388534400 +// bucket/objectname +// ------------------- +func (u *UrlData) SigningString() []byte { + var buf bytes.Buffer + + // HTTP Verb + buf.WriteString(u.HttpMethod) + buf.WriteString("\n") + + // Content MD5 (optional, always add new line) + buf.WriteString(u.ContentMd5) + buf.WriteString("\n") + + // Content Type (optional, always add new line) + buf.WriteString(u.ContentType) + buf.WriteString("\n") + + // Expiration + buf.WriteString(strconv.Itoa(u.Expires)) + buf.WriteString("\n") + + // Extra HTTP headers (optional) + // Must be sorted in lexigraphical order + var keys []string + for k := range u.HttpHeaders { + keys = append(keys, strings.ToLower(k)) + } + sort.Strings(keys) + // Write sorted headers to signing string buffer + for _, k := range keys { + buf.WriteString(fmt.Sprintf("%s:%s\n", k, u.HttpHeaders[k])) + } + + // Storate Object path (includes bucketname) + buf.WriteString(u.Path) + + return buf.Bytes() +} + +func (u *UrlData) Signature() ([]byte, error) { + // Sign url data + signature, err := SignString(u.SigningString(), u.JwtConfig) + if err != nil { + return nil, err + + } + + return signature, nil +} + +// EncodedSignature returns the Signature() after base64 encoding and url escaping +func (u *UrlData) EncodedSignature() (string, error) { + signature, err := u.Signature() + if err != nil { + return "", err + } + + // base64 encode signature + encoded := base64.StdEncoding.EncodeToString(signature) + // encoded signature may include /, = characters that need escaping + encoded = url.QueryEscape(encoded) + + return encoded, nil +} + +// SignedUrl constructs the final signed URL a client can use to retrieve storage object +func (u *UrlData) SignedUrl() (string, error) { + + encodedSig, err := u.EncodedSignature() + if err != nil { + return "", err + } + + // build url + // https://cloud.google.com/storage/docs/access-control/create-signed-urls-program + var urlBuffer bytes.Buffer + urlBuffer.WriteString(gcsBaseUrl) + urlBuffer.WriteString(u.Path) + urlBuffer.WriteString("?GoogleAccessId=") + urlBuffer.WriteString(u.JwtConfig.Email) + urlBuffer.WriteString("&Expires=") + urlBuffer.WriteString(strconv.Itoa(u.Expires)) + urlBuffer.WriteString("&Signature=") + urlBuffer.WriteString(encodedSig) + + return urlBuffer.String(), nil +} + +// SignString calculates the SHA256 signature of the input string +func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { + // Parse private key + pk, err := parsePrivateKey(cfg.PrivateKey) + if err != nil { + return nil, errwrap.Wrapf("failed to sign string, could not parse key: {{err}}", err) + } + + // Hash string + hasher := sha256.New() + hasher.Write(toSign) + + // Sign string + signed, err := rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hasher.Sum(nil)) + if err != nil { + return nil, errwrap.Wrapf("failed to sign string, an error occurred: {{err}}", err) + } + + return signed, nil +} diff --git a/provider/terraform/resources/resource_app_engine_application.go b/provider/terraform/resources/resource_app_engine_application.go new file mode 100644 index 000000000000..68cc38102c6d --- /dev/null +++ b/provider/terraform/resources/resource_app_engine_application.go @@ -0,0 +1,285 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/customdiff" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + appengine "google.golang.org/api/appengine/v1" +) + +func resourceAppEngineApplication() *schema.Resource { + return &schema.Resource{ + Create: resourceAppEngineApplicationCreate, + Read: resourceAppEngineApplicationRead, + Update: resourceAppEngineApplicationUpdate, + Delete: resourceAppEngineApplicationDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + CustomizeDiff: customdiff.All( + appEngineApplicationLocationIDCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateProjectID(), + }, + "auth_domain": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "location_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "northamerica-northeast1", + "us-central", + "us-west2", + "us-east1", + "us-east4", + "southamerica-east1", + "europe-west", + "europe-west2", + "europe-west3", + "asia-northeast1", + "asia-south1", + "australia-southeast1", + }, false), + }, + "serving_status": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "UNSPECIFIED", + "SERVING", + "USER_DISABLED", + "SYSTEM_DISABLED", + }, false), + Computed: true, + }, + "feature_settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: appEngineApplicationFeatureSettingsResource(), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "url_dispatch_rule": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: appEngineApplicationURLDispatchRuleResource(), + }, + "code_bucket": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "default_hostname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "default_bucket": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "gcr_domain": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func appEngineApplicationURLDispatchRuleResource() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "service": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func appEngineApplicationFeatureSettingsResource() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "split_health_checks": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + } +} + +func appEngineApplicationLocationIDCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange("location_id") + if old != "" && old != new { + return fmt.Errorf("Cannot change location_id once the resource is created.") + } + return nil +} + +func resourceAppEngineApplicationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + app, err := expandAppEngineApplication(d, project) + if err != nil { + return err + } + log.Printf("[DEBUG] Creating App Engine App") + op, err := config.clientAppEngine.Apps.Create(app).Do() + if err != nil { + return fmt.Errorf("Error creating App Engine application: %s", err.Error()) + } + + d.SetId(project) + + // Wait for the operation to complete + waitErr := appEngineOperationWait(config.clientAppEngine, op, project, "App Engine app to create") + if waitErr != nil { + d.SetId("") + return waitErr + } + log.Printf("[DEBUG] Created App Engine App") + + return resourceAppEngineApplicationRead(d, meta) +} + +func resourceAppEngineApplicationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Id() + + app, err := config.clientAppEngine.Apps.Get(pid).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("App Engine Application %q", pid)) + } + d.Set("auth_domain", app.AuthDomain) + d.Set("code_bucket", app.CodeBucket) + d.Set("default_bucket", app.DefaultBucket) + d.Set("default_hostname", app.DefaultHostname) + d.Set("location_id", app.LocationId) + d.Set("name", app.Name) + d.Set("serving_status", app.ServingStatus) + d.Set("project", pid) + dispatchRules, err := flattenAppEngineApplicationDispatchRules(app.DispatchRules) + if err != nil { + return err + } + err = d.Set("url_dispatch_rule", dispatchRules) + if err != nil { + return fmt.Errorf("Error setting dispatch rules in state. This is a bug, please report it at https://github.com/terraform-providers/terraform-provider-google/issues. Error is:\n%s", err.Error()) + } + featureSettings, err := flattenAppEngineApplicationFeatureSettings(app.FeatureSettings) + if err != nil { + return err + } + err = d.Set("feature_settings", featureSettings) + if err != nil { + return fmt.Errorf("Error setting feature settings in state. This is a bug, please report it at https://github.com/terraform-providers/terraform-provider-google/issues. Error is:\n%s", err.Error()) + } + return nil +} + +func resourceAppEngineApplicationUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Id() + app, err := expandAppEngineApplication(d, pid) + if err != nil { + return err + } + log.Printf("[DEBUG] Updating App Engine App") + op, err := config.clientAppEngine.Apps.Patch(pid, app).UpdateMask("authDomain,servingStatus,featureSettings.splitHealthChecks").Do() + if err != nil { + return fmt.Errorf("Error updating App Engine application: %s", err.Error()) + } + + // Wait for the operation to complete + waitErr := appEngineOperationWait(config.clientAppEngine, op, pid, "App Engine app to update") + if waitErr != nil { + return waitErr + } + log.Printf("[DEBUG] Updated App Engine App") + + return resourceAppEngineApplicationRead(d, meta) +} + +func resourceAppEngineApplicationDelete(d *schema.ResourceData, meta interface{}) error { + log.Println("[WARN] App Engine applications cannot be destroyed once created. The project must be deleted to delete the application.") + return nil +} + +func expandAppEngineApplication(d *schema.ResourceData, project string) (*appengine.Application, error) { + result := &appengine.Application{ + AuthDomain: d.Get("auth_domain").(string), + LocationId: d.Get("location_id").(string), + Id: project, + GcrDomain: d.Get("gcr_domain").(string), + ServingStatus: d.Get("serving_status").(string), + } + featureSettings, err := expandAppEngineApplicationFeatureSettings(d) + if err != nil { + return nil, err + } + result.FeatureSettings = featureSettings + return result, nil +} + +func expandAppEngineApplicationFeatureSettings(d *schema.ResourceData) (*appengine.FeatureSettings, error) { + blocks := d.Get("feature_settings").([]interface{}) + if len(blocks) < 1 { + return nil, nil + } + return &appengine.FeatureSettings{ + SplitHealthChecks: d.Get("feature_settings.0.split_health_checks").(bool), + // force send SplitHealthChecks, so if it's set to false it still gets disabled + ForceSendFields: []string{"SplitHealthChecks"}, + }, nil +} + +func flattenAppEngineApplicationFeatureSettings(settings *appengine.FeatureSettings) ([]map[string]interface{}, error) { + if settings == nil { + return []map[string]interface{}{}, nil + } + result := map[string]interface{}{ + "split_health_checks": settings.SplitHealthChecks, + } + return []map[string]interface{}{result}, nil +} + +func flattenAppEngineApplicationDispatchRules(rules []*appengine.UrlDispatchRule) ([]map[string]interface{}, error) { + results := make([]map[string]interface{}, 0, len(rules)) + for _, rule := range rules { + results = append(results, map[string]interface{}{ + "domain": rule.Domain, + "path": rule.Path, + "service": rule.Service, + }) + } + return results, nil +} diff --git a/provider/terraform/resources/resource_bigquery_dataset.go b/provider/terraform/resources/resource_bigquery_dataset.go new file mode 100644 index 000000000000..e2cbf490811b --- /dev/null +++ b/provider/terraform/resources/resource_bigquery_dataset.go @@ -0,0 +1,432 @@ +package google + +import ( + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/bigquery/v2" +) + +const datasetIdRegexp = `[0-9A-Za-z_]+` + +func resourceBigQueryDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryDatasetCreate, + Read: resourceBigQueryDatasetRead, + Update: resourceBigQueryDatasetUpdate, + Delete: resourceBigQueryDatasetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + // DatasetId: [Required] A unique ID for this dataset, without the + // project name. The ID must contain only letters (a-z, A-Z), numbers + // (0-9), or underscores (_). The maximum length is 1,024 characters. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(datasetIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) + } + + if len(value) > 1024 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 1,024 characters", k)) + } + + return + }, + }, + + // ProjectId: [Optional] The ID of the project containing this dataset. + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + // FriendlyName: [Optional] A descriptive name for the dataset. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + }, + + // Description: [Optional] A user-friendly description of the dataset. + "description": { + Type: schema.TypeString, + Optional: true, + }, + + // Location: [Experimental] The geographic location where the dataset + // should reside. + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "US", + ValidateFunc: validation.StringInSlice([]string{"US", "EU", "asia-northeast1"}, false), + }, + + // DefaultTableExpirationMs: [Optional] The default lifetime of all + // tables in the dataset, in milliseconds. The minimum value is 3600000 + // milliseconds (one hour). Once this property is set, all newly-created + // tables in the dataset will have an expirationTime property set to the + // creation time plus the value in this property, and changing the value + // will only affect new tables, not existing ones. When the + // expirationTime for a given table is reached, that table will be + // deleted automatically. If a table's expirationTime is modified or + // removed before the table expires, or if you provide an explicit + // expirationTime when creating a table, that value takes precedence + // over the default expiration time indicated by this property. + "default_table_expiration_ms": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 3600000 { + errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) + } + + return + }, + }, + + // Labels: [Experimental] The labels associated with this dataset. You + // can use these to organize and group your datasets. You can set this + // property when inserting or updating a dataset. + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + // Access: [Optional] An array of objects that define dataset access + // for one or more entities. You can set this property when inserting + // or updating a dataset in order to control who is allowed to access + // the data. + "access": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + // Computed because if unset, BQ adds 4 entries automatically + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"OWNER", "WRITER", "READER"}, false), + }, + "domain": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "group_by_email": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "special_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "user_by_email": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "view": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + }, + "dataset_id": { + Type: schema.TypeString, + Required: true, + }, + "table_id": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + // SelfLink: [Output-only] A URL that can be used to access the resource + // again. You can use this URL in Get or Update requests to the + // resource. + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + // Etag: [Output-only] A hash of the resource. + "etag": { + Type: schema.TypeString, + Computed: true, + }, + + // CreationTime: [Output-only] The time when this dataset was created, + // in milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // LastModifiedTime: [Output-only] The date when this dataset or any of + // its tables was last modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourceDataset(d *schema.ResourceData, meta interface{}) (*bigquery.Dataset, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + dataset := &bigquery.Dataset{ + DatasetReference: &bigquery.DatasetReference{ + DatasetId: d.Get("dataset_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("friendly_name"); ok { + dataset.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("description"); ok { + dataset.Description = v.(string) + } + + if v, ok := d.GetOk("location"); ok { + dataset.Location = v.(string) + } + + if v, ok := d.GetOk("default_table_expiration_ms"); ok { + dataset.DefaultTableExpirationMs = int64(v.(int)) + } + + if v, ok := d.GetOk("labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + dataset.Labels = labels + } + + if v, ok := d.GetOk("access"); ok { + access := []*bigquery.DatasetAccess{} + for _, m := range v.([]interface{}) { + da := bigquery.DatasetAccess{} + accessMap := m.(map[string]interface{}) + da.Role = accessMap["role"].(string) + if val, ok := accessMap["domain"]; ok { + da.Domain = val.(string) + } + if val, ok := accessMap["group_by_email"]; ok { + da.GroupByEmail = val.(string) + } + if val, ok := accessMap["special_group"]; ok { + da.SpecialGroup = val.(string) + } + if val, ok := accessMap["user_by_email"]; ok { + da.UserByEmail = val.(string) + } + if val, ok := accessMap["view"]; ok { + views := val.([]interface{}) + if len(views) > 0 { + vm := views[0].(map[string]interface{}) + if len(vm) > 0 { + view := bigquery.TableReference{} + if dsId, ok := vm["dataset_id"]; ok { + view.DatasetId = dsId.(string) + } + if pId, ok := vm["project_id"]; ok { + view.ProjectId = pId.(string) + } + if tId, ok := vm["table_id"]; ok { + view.TableId = tId.(string) + } + da.View = &view + } + } + } + access = append(access, &da) + } + dataset.Access = access + } + + return dataset, nil +} + +func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Creating BigQuery dataset: %s", dataset.DatasetReference.DatasetId) + + res, err := config.clientBigQuery.Datasets.Insert(project, dataset).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery dataset %s has been created", res.Id) + + d.SetId(res.Id) + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Reading BigQuery dataset: %s", d.Id()) + + id, err := parseBigQueryDatasetId(d.Id()) + if err != nil { + return err + } + + res, err := config.clientBigQuery.Datasets.Get(id.Project, id.DatasetId).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("BigQuery dataset %q", id.DatasetId)) + } + + d.Set("project", id.Project) + d.Set("etag", res.Etag) + d.Set("labels", res.Labels) + if err := d.Set("access", flattenAccess(res.Access)); err != nil { + return err + } + d.Set("self_link", res.SelfLink) + d.Set("description", res.Description) + d.Set("friendly_name", res.FriendlyName) + d.Set("creation_time", res.CreationTime) + d.Set("last_modified_time", res.LastModifiedTime) + d.Set("dataset_id", res.DatasetReference.DatasetId) + d.Set("default_table_expiration_ms", res.DefaultTableExpirationMs) + + // Older Tables in BigQuery have no Location set in the API response. This may be an issue when importing + // tables created before BigQuery was available in multiple zones. We can safely assume that these tables + // are in the US, as this was the default at the time. + if res.Location == "" { + d.Set("location", "US") + } else { + d.Set("location", res.Location) + } + + return nil +} + +func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery dataset: %s", d.Id()) + + id, err := parseBigQueryDatasetId(d.Id()) + if err != nil { + return err + } + + if _, err = config.clientBigQuery.Datasets.Update(id.Project, id.DatasetId, dataset).Do(); err != nil { + return err + } + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Deleting BigQuery dataset: %s", d.Id()) + + id, err := parseBigQueryDatasetId(d.Id()) + if err != nil { + return err + } + + if err := config.clientBigQuery.Datasets.Delete(id.Project, id.DatasetId).Do(); err != nil { + return err + } + + d.SetId("") + return nil +} + +type bigQueryDatasetId struct { + Project, DatasetId string +} + +func parseBigQueryDatasetId(id string) (*bigQueryDatasetId, error) { + pd := fmt.Sprintf("(%s):(%s)", ProjectRegex, datasetIdRegexp) + re := regexp.MustCompile(pd) + if parts := re.FindStringSubmatch(id); parts != nil { + return &bigQueryDatasetId{ + Project: parts[1], + DatasetId: parts[2], + }, nil + } + + return nil, fmt.Errorf("Invalid BigQuery dataset specifier. Expecting {project}:{dataset-id}, got %s", id) +} + +func flattenAccess(a []*bigquery.DatasetAccess) []map[string]interface{} { + access := make([]map[string]interface{}, 0, len(a)) + for _, da := range a { + ai := map[string]interface{}{ + "role": da.Role, + "domain": da.Domain, + "group_by_email": da.GroupByEmail, + "special_group": da.SpecialGroup, + "user_by_email": da.UserByEmail, + } + if da.View != nil { + view := []map[string]interface{}{{ + "project_id": da.View.ProjectId, + "dataset_id": da.View.DatasetId, + "table_id": da.View.TableId, + }, + } + ai["view"] = view + } + access = append(access, ai) + } + return access +} diff --git a/provider/terraform/resources/resource_bigquery_table.go b/provider/terraform/resources/resource_bigquery_table.go new file mode 100644 index 000000000000..19dfc7c98000 --- /dev/null +++ b/provider/terraform/resources/resource_bigquery_table.go @@ -0,0 +1,491 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/bigquery/v2" +) + +func resourceBigQueryTable() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryTableCreate, + Read: resourceBigQueryTableRead, + Delete: resourceBigQueryTableDelete, + Update: resourceBigQueryTableUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + // TableId: [Required] The ID of the table. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // DatasetId: [Required] The ID of the dataset containing this table. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // ProjectId: [Required] The ID of the project containing this table. + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + // Description: [Optional] A user-friendly description of this table. + "description": { + Type: schema.TypeString, + Optional: true, + }, + + // ExpirationTime: [Optional] The time when this table expires, in + // milliseconds since the epoch. If not present, the table will persist + // indefinitely. Expired tables will be deleted and their storage + // reclaimed. + "expiration_time": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + // FriendlyName: [Optional] A descriptive name for this table. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + }, + + // Labels: [Experimental] The labels associated with this table. You can + // use these to organize and group your tables. Label keys and values + // can be no longer than 63 characters, can only contain lowercase + // letters, numeric characters, underscores and dashes. International + // characters are allowed. Label values are optional. Label keys must + // start with a letter and each label in the list must have a different + // key. + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + // Schema: [Optional] Describes the schema of this table. + "schema": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.ValidateJsonString, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + }, + + // View: [Optional] If specified, configures this table as a view. + "view": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Query: [Required] A query that BigQuery executes when the view is + // referenced. + "query": { + Type: schema.TypeString, + Required: true, + }, + + // UseLegacySQL: [Optional] Specifies whether to use BigQuery's + // legacy SQL for this view. The default value is true. If set to + // false, the view will use BigQuery's standard SQL: + "use_legacy_sql": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + }, + }, + + // TimePartitioning: [Experimental] If specified, configures time-based + // partitioning for this table. + "time_partitioning": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // ExpirationMs: [Optional] Number of milliseconds for which to keep the + // storage for a partition. + "expiration_ms": { + Type: schema.TypeInt, + Optional: true, + }, + + // Type: [Required] The only type supported is DAY, which will generate + // one partition per day based on data loading time. + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"DAY"}, false), + }, + + // Type: [Optional] The field used to determine how to create a time-based + // partition. If time-based partitioning is enabled without this value, the + // table is partitioned based on the load time. + "field": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + // CreationTime: [Output-only] The time when this table was created, in + // milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // Etag: [Output-only] A hash of this resource. + "etag": { + Type: schema.TypeString, + Computed: true, + }, + + // LastModifiedTime: [Output-only] The time when this table was last + // modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // Location: [Output-only] The geographic location where the table + // resides. This value is inherited from the dataset. + "location": { + Type: schema.TypeString, + Computed: true, + }, + + // NumBytes: [Output-only] The size of this table in bytes, excluding + // any data in the streaming buffer. + "num_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + // NumLongTermBytes: [Output-only] The number of bytes in the table that + // are considered "long-term storage". + "num_long_term_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + // NumRows: [Output-only] The number of rows of data in this table, + // excluding any data in the streaming buffer. + "num_rows": { + Type: schema.TypeInt, + Computed: true, + }, + + // SelfLink: [Output-only] A URL that can be used to access this + // resource again. + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + // Type: [Output-only] Describes the table type. The following values + // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table + // defined by a SQL query. EXTERNAL: A table that references data stored + // in an external storage system, such as Google Cloud Storage. The + // default value is TABLE. + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + table := &bigquery.Table{ + TableReference: &bigquery.TableReference{ + DatasetId: d.Get("dataset_id").(string), + TableId: d.Get("table_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("view"); ok { + table.View = expandView(v) + } + + if v, ok := d.GetOk("description"); ok { + table.Description = v.(string) + } + + if v, ok := d.GetOk("expiration_time"); ok { + table.ExpirationTime = int64(v.(int)) + } + + if v, ok := d.GetOk("friendly_name"); ok { + table.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + table.Labels = labels + } + + if v, ok := d.GetOk("schema"); ok { + schema, err := expandSchema(v) + if err != nil { + return nil, err + } + + table.Schema = schema + } + + if v, ok := d.GetOk("time_partitioning"); ok { + table.TimePartitioning = expandTimePartitioning(v) + } + + return table, nil +} + +func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + + log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) + + res, err := config.clientBigQuery.Tables.Insert(project, datasetID, table).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery table %s has been created", res.Id) + + d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) + + id, err := parseBigQueryTableId(d.Id()) + if err != nil { + return err + } + + res, err := config.clientBigQuery.Tables.Get(id.Project, id.DatasetId, id.TableId).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", id.TableId)) + } + + d.Set("project", id.Project) + d.Set("description", res.Description) + d.Set("expiration_time", res.ExpirationTime) + d.Set("friendly_name", res.FriendlyName) + d.Set("labels", res.Labels) + d.Set("creation_time", res.CreationTime) + d.Set("etag", res.Etag) + d.Set("last_modified_time", res.LastModifiedTime) + d.Set("location", res.Location) + d.Set("num_bytes", res.NumBytes) + d.Set("table_id", res.TableReference.TableId) + d.Set("dataset_id", res.TableReference.DatasetId) + d.Set("num_long_term_bytes", res.NumLongTermBytes) + d.Set("num_rows", res.NumRows) + d.Set("self_link", res.SelfLink) + d.Set("type", res.Type) + + if res.TimePartitioning != nil { + if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil { + return err + } + } + + if res.Schema != nil { + schema, err := flattenSchema(res.Schema) + if err != nil { + return err + } + + d.Set("schema", schema) + } + + if res.View != nil { + view := flattenView(res.View) + d.Set("view", view) + } + + return nil +} + +func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) + + id, err := parseBigQueryTableId(d.Id()) + if err != nil { + return err + } + + if _, err = config.clientBigQuery.Tables.Update(id.Project, id.DatasetId, id.TableId, table).Do(); err != nil { + return err + } + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) + + id, err := parseBigQueryTableId(d.Id()) + if err != nil { + return err + } + + if err := config.clientBigQuery.Tables.Delete(id.Project, id.DatasetId, id.TableId).Do(); err != nil { + return err + } + + d.SetId("") + + return nil +} + +func expandSchema(raw interface{}) (*bigquery.TableSchema, error) { + var fields []*bigquery.TableFieldSchema + + if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { + return nil, err + } + + return &bigquery.TableSchema{Fields: fields}, nil +} + +func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { + schema, err := json.Marshal(tableSchema.Fields) + if err != nil { + return "", err + } + + return string(schema), nil +} + +func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { + raw := configured.([]interface{})[0].(map[string]interface{}) + tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} + + if v, ok := raw["field"]; ok { + tp.Field = v.(string) + } + + if v, ok := raw["expiration_ms"]; ok { + tp.ExpirationMs = int64(v.(int)) + } + + return tp +} + +func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} { + result := map[string]interface{}{"type": tp.Type} + + if tp.Field != "" { + result["field"] = tp.Field + } + + if tp.ExpirationMs != 0 { + result["expiration_ms"] = tp.ExpirationMs + } + + return []map[string]interface{}{result} +} + +func expandView(configured interface{}) *bigquery.ViewDefinition { + raw := configured.([]interface{})[0].(map[string]interface{}) + vd := &bigquery.ViewDefinition{Query: raw["query"].(string)} + + if v, ok := raw["use_legacy_sql"]; ok { + vd.UseLegacySql = v.(bool) + vd.ForceSendFields = append(vd.ForceSendFields, "UseLegacySql") + } + + return vd +} + +func flattenView(vd *bigquery.ViewDefinition) []map[string]interface{} { + result := map[string]interface{}{"query": vd.Query} + result["use_legacy_sql"] = vd.UseLegacySql + + return []map[string]interface{}{result} +} + +type bigQueryTableId struct { + Project, DatasetId, TableId string +} + +func parseBigQueryTableId(id string) (*bigQueryTableId, error) { + parts := strings.FieldsFunc(id, func(r rune) bool { return r == ':' || r == '.' }) + + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid BigQuery table specifier. Expecting {project}:{dataset-id}.{table-id}, got %s", id) + } + + return &bigQueryTableId{ + Project: parts[0], + DatasetId: parts[1], + TableId: parts[2], + }, nil +} diff --git a/provider/terraform/resources/resource_bigtable_instance.go b/provider/terraform/resources/resource_bigtable_instance.go new file mode 100644 index 000000000000..d910d6d8663a --- /dev/null +++ b/provider/terraform/resources/resource_bigtable_instance.go @@ -0,0 +1,356 @@ +package google + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/customdiff" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + + "cloud.google.com/go/bigtable" +) + +func resourceBigtableInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceBigtableInstanceCreate, + Read: resourceBigtableInstanceRead, + // TODO: Update is only needed because we're doing forcenew in customizediff + // when we're done with the deprecation, we can drop customizediff and make cluster forcenew + Update: schema.Noop, + Delete: resourceBigtableInstanceDestroy, + CustomizeDiff: customdiff.All( + resourceBigTableInstanceClusterCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cluster_id": { + Type: schema.TypeString, + Optional: true, + Deprecated: "Use cluster instead.", + ConflictsWith: []string{"cluster"}, + }, + + "cluster": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"cluster_id", "zone", "num_nodes", "storage_type"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Optional: true, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "num_nodes": { + Type: schema.TypeInt, + Optional: true, + }, + "storage_type": { + Type: schema.TypeString, + Optional: true, + Default: "SSD", + ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false), + }, + }, + }, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Deprecated: "Use cluster instead.", + ConflictsWith: []string{"cluster"}, + }, + + "display_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "num_nodes": { + Type: schema.TypeInt, + Optional: true, + Deprecated: "Use cluster instead.", + ConflictsWith: []string{"cluster"}, + }, + + "instance_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "PRODUCTION", + ValidateFunc: validation.StringInSlice([]string{"DEVELOPMENT", "PRODUCTION"}, false), + }, + + "storage_type": { + Type: schema.TypeString, + Optional: true, + Default: "SSD", + ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false), + Deprecated: "Use cluster instead.", + ConflictsWith: []string{"cluster"}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceBigTableInstanceClusterCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error { + if d.Get("cluster_id").(string) == "" && d.Get("cluster.#").(int) == 0 { + return fmt.Errorf("At least one cluster must be set.") + } + if !d.HasChange("cluster_id") && !d.HasChange("zone") && !d.HasChange("num_nodes") && + !d.HasChange("storage_type") && !d.HasChange("cluster") { + return nil + } + if d.Get("cluster.#").(int) == 1 { + // if we have exactly one cluster, and it has the same values as the old top-level + // values, we can assume the user is trying to go from the deprecated values to the + // new values, and we shouldn't ForceNew. We know that the top-level values aren't + // set, because they ConflictWith cluster. + oldID, _ := d.GetChange("cluster_id") + oldNodes, _ := d.GetChange("num_nodes") + oldZone, _ := d.GetChange("zone") + oldStorageType, _ := d.GetChange("storage_type") + new := d.Get("cluster").(*schema.Set).List()[0].(map[string]interface{}) + + if oldID.(string) == new["cluster_id"].(string) && + oldNodes.(int) == new["num_nodes"].(int) && + oldZone.(string) == new["zone"].(string) && + oldStorageType.(string) == new["storage_type"].(string) { + return nil + } + } + if d.HasChange("cluster_id") { + d.ForceNew("cluster_id") + } + if d.HasChange("cluster") { + d.ForceNew("cluster") + } + if d.HasChange("zone") { + d.ForceNew("zone") + } + if d.HasChange("num_nodes") { + d.ForceNew("num_nodes") + } + if d.HasChange("storage_type") { + d.ForceNew("storage_type") + } + return nil +} + +func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + ctx := context.Background() + + project, err := getProject(d, config) + if err != nil { + return err + } + + conf := &bigtable.InstanceWithClustersConfig{ + InstanceID: d.Get("name").(string), + } + + displayName, ok := d.GetOk("display_name") + if !ok { + displayName = conf.InstanceID + } + conf.DisplayName = displayName.(string) + + switch d.Get("instance_type").(string) { + case "DEVELOPMENT": + conf.InstanceType = bigtable.DEVELOPMENT + case "PRODUCTION": + conf.InstanceType = bigtable.PRODUCTION + } + + if d.Get("cluster.#").(int) > 0 { + // expand cluster + conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID, config.Zone) + if err != nil { + return fmt.Errorf("error expanding clusters: %s", err.Error()) + } + } else { + // TODO: remove this when we're done with the deprecation period + zone, err := getZone(d, config) + if err != nil { + return err + } + cluster := bigtable.ClusterConfig{ + InstanceID: conf.InstanceID, + NumNodes: int32(d.Get("num_nodes").(int)), + Zone: zone, + ClusterID: d.Get("cluster_id").(string), + } + switch d.Get("storage_type").(string) { + case "HDD": + cluster.StorageType = bigtable.HDD + case "SSD": + cluster.StorageType = bigtable.SSD + } + conf.Clusters = append(conf.Clusters, cluster) + } + + c, err := config.bigtableClientFactory.NewInstanceAdminClient(project) + if err != nil { + return fmt.Errorf("Error starting instance admin client. %s", err) + } + + defer c.Close() + + err = c.CreateInstanceWithClusters(ctx, conf) + if err != nil { + return fmt.Errorf("Error creating instance. %s", err) + } + + d.SetId(conf.InstanceID) + + return resourceBigtableInstanceRead(d, meta) +} + +func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + ctx := context.Background() + + project, err := getProject(d, config) + if err != nil { + return err + } + + c, err := config.bigtableClientFactory.NewInstanceAdminClient(project) + if err != nil { + return fmt.Errorf("Error starting instance admin client. %s", err) + } + + defer c.Close() + + instance, err := c.InstanceInfo(ctx, d.Id()) + if err != nil { + log.Printf("[WARN] Removing %s because it's gone", d.Id()) + d.SetId("") + return fmt.Errorf("Error retrieving instance. Could not find %s. %s", d.Id(), err) + } + + d.Set("project", project) + if d.Get("cluster.#").(int) > 0 { + clusters := d.Get("cluster").(*schema.Set).List() + clusterState := []map[string]interface{}{} + for _, cl := range clusters { + cluster := cl.(map[string]interface{}) + clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string)) + if err != nil { + if isGoogleApiErrorWithCode(err, 404) { + log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string)) + continue + } + return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error()) + } + clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string))) + } + err = d.Set("cluster", clusterState) + if err != nil { + return fmt.Errorf("Error setting clusters in state: %s", err.Error()) + } + d.Set("cluster_id", "") + d.Set("zone", "") + d.Set("num_nodes", 0) + d.Set("storage_type", "SSD") + } else { + // TODO remove this when we're done with our deprecation period + zone, err := getZone(d, config) + if err != nil { + return err + } + d.Set("zone", zone) + } + d.Set("name", instance.Name) + d.Set("display_name", instance.DisplayName) + + return nil +} + +func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + ctx := context.Background() + + project, err := getProject(d, config) + if err != nil { + return err + } + + c, err := config.bigtableClientFactory.NewInstanceAdminClient(project) + if err != nil { + return fmt.Errorf("Error starting instance admin client. %s", err) + } + + defer c.Close() + + name := d.Id() + err = c.DeleteInstance(ctx, name) + if err != nil { + return fmt.Errorf("Error deleting instance. %s", err) + } + + d.SetId("") + + return nil +} + +func flattenBigtableCluster(c *bigtable.ClusterInfo, storageType string) map[string]interface{} { + return map[string]interface{}{ + "zone": c.Zone, + "num_nodes": c.ServeNodes, + "cluster_id": c.Name, + "storage_type": storageType, + } +} + +func expandBigtableClusters(clusters []interface{}, instanceID string, defaultZone string) []bigtable.ClusterConfig { + results := make([]bigtable.ClusterConfig, 0, len(clusters)) + for _, c := range clusters { + cluster := c.(map[string]interface{}) + zone := defaultZone + if confZone, ok := cluster["zone"]; ok { + zone = confZone.(string) + } + var storageType bigtable.StorageType + switch cluster["storage_type"].(string) { + case "SSD": + storageType = bigtable.SSD + case "HDD": + storageType = bigtable.HDD + } + results = append(results, bigtable.ClusterConfig{ + InstanceID: instanceID, + Zone: zone, + ClusterID: cluster["cluster_id"].(string), + NumNodes: int32(cluster["num_nodes"].(int)), + StorageType: storageType, + }) + } + return results +} diff --git a/provider/terraform/resources/resource_bigtable_table.go b/provider/terraform/resources/resource_bigtable_table.go new file mode 100644 index 000000000000..4afeefddae3d --- /dev/null +++ b/provider/terraform/resources/resource_bigtable_table.go @@ -0,0 +1,171 @@ +package google + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceBigtableTable() *schema.Resource { + return &schema.Resource{ + Create: resourceBigtableTableCreate, + Read: resourceBigtableTableRead, + Delete: resourceBigtableTableDestroy, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "column_family": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "family": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "split_keys": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + ctx := context.Background() + + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceName := d.Get("instance_name").(string) + c, err := config.bigtableClientFactory.NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Get("name").(string) + if v, ok := d.GetOk("split_keys"); ok { + splitKeys := convertStringArr(v.([]interface{})) + // This method may return before the table's creation is complete - we may need to wait until + // it exists in the future. + err = c.CreatePresplitTable(ctx, name, splitKeys) + if err != nil { + return fmt.Errorf("Error creating presplit table. %s", err) + } + } else { + // This method may return before the table's creation is complete - we may need to wait until + // it exists in the future. + err = c.CreateTable(ctx, name) + if err != nil { + return fmt.Errorf("Error creating table. %s", err) + } + } + + if d.Get("column_family.#").(int) > 0 { + columns := d.Get("column_family").(*schema.Set).List() + + for _, co := range columns { + column := co.(map[string]interface{}) + + if v, ok := column["family"]; ok { + if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { + return fmt.Errorf("Error creating column family %s. %s", v, err) + } + } + } + } + + d.SetId(name) + + return resourceBigtableTableRead(d, meta) +} + +func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + ctx := context.Background() + + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceName := d.Get("instance_name").(string) + c, err := config.bigtableClientFactory.NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Id() + _, err = c.TableInfo(ctx, name) + if err != nil { + log.Printf("[WARN] Removing %s because it's gone", name) + d.SetId("") + return fmt.Errorf("Error retrieving table. Could not find %s in %s. %s", name, instanceName, err) + } + + d.Set("project", project) + + return nil +} + +func resourceBigtableTableDestroy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + ctx := context.Background() + + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceName := d.Get("instance_name").(string) + c, err := config.bigtableClientFactory.NewAdminClient(project, instanceName) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + name := d.Get("name").(string) + err = c.DeleteTable(ctx, name) + if err != nil { + return fmt.Errorf("Error deleting table. %s", err) + } + + d.SetId("") + + return nil +} diff --git a/provider/terraform/resources/resource_cloudbuild_build_trigger.go b/provider/terraform/resources/resource_cloudbuild_build_trigger.go new file mode 100644 index 000000000000..8009a687e0c8 --- /dev/null +++ b/provider/terraform/resources/resource_cloudbuild_build_trigger.go @@ -0,0 +1,336 @@ +// Package google - implement CRUD operations for Container Registry Build Triggers +// https://cloud.google.com/container-builder/docs/api/reference/rest/v1/projects.triggers#BuildTrigger +package google + +import ( + "encoding/json" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudbuild/v1" +) + +func resourceCloudBuildTrigger() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudbuildBuildTriggerCreate, + Read: resourceCloudbuildBuildTriggerRead, + Delete: resourceCloudbuildBuildTriggerDelete, + Importer: &schema.ResourceImporter{ + State: resourceCloudBuildTriggerImportState, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(3 * time.Minute), + }, + + SchemaVersion: 1, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "filename": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"build"}, + }, + "build": { + Type: schema.TypeList, + Description: "Contents of the build template.", + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "images": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "step": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "args": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "substitutions": &schema.Schema{ + Optional: true, + Type: schema.TypeMap, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "trigger_template": &schema.Schema{ + Optional: true, + Type: schema.TypeList, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "branch_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "commit_sha": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "dir": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "repo_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "tag_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func resourceCloudbuildBuildTriggerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the address parameter + buildTrigger := &cloudbuild.BuildTrigger{} + + if v, ok := d.GetOk("description"); ok { + buildTrigger.Description = v.(string) + } + + if v, ok := d.GetOk("filename"); ok { + buildTrigger.Filename = v.(string) + } else { + buildTrigger.Build = expandCloudbuildBuildTriggerBuild(d) + } + + buildTrigger.TriggerTemplate = expandCloudbuildBuildTriggerTemplate(d, project) + buildTrigger.Substitutions = expandStringMap(d, "substitutions") + + tstr, err := json.Marshal(buildTrigger) + if err != nil { + return err + } + log.Printf("[INFO] build trigger request: %s", string(tstr)) + trigger, err := config.clientBuild.Projects.Triggers.Create(project, buildTrigger).Do() + if err != nil { + return fmt.Errorf("Error creating build trigger: %s", err) + } + + d.SetId(trigger.Id) + + return resourceCloudbuildBuildTriggerRead(d, meta) +} + +func resourceCloudbuildBuildTriggerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + ID := d.Id() + buildTrigger, err := config.clientBuild.Projects.Triggers.Get(project, ID).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Cloudbuild Trigger %q", ID)) + } + + d.Set("description", buildTrigger.Description) + d.Set("substitutions", buildTrigger.Substitutions) + + if buildTrigger.TriggerTemplate != nil { + d.Set("trigger_template", flattenCloudbuildBuildTriggerTemplate(d, config, buildTrigger.TriggerTemplate)) + } + + if buildTrigger.Filename != "" { + d.Set("filename", buildTrigger.Filename) + } else if buildTrigger.Build != nil { + d.Set("build", flattenCloudbuildBuildTriggerBuild(d, config, buildTrigger.Build)) + } + + return nil +} + +func expandCloudbuildBuildTriggerTemplate(d *schema.ResourceData, project string) *cloudbuild.RepoSource { + if d.Get("trigger_template.#").(int) == 0 { + return nil + } + tmpl := &cloudbuild.RepoSource{} + if v, ok := d.GetOk("trigger_template.0.project"); ok { + tmpl.ProjectId = v.(string) + } else { + tmpl.ProjectId = project + } + if v, ok := d.GetOk("trigger_template.0.branch_name"); ok { + tmpl.BranchName = v.(string) + } + if v, ok := d.GetOk("trigger_template.0.commit_sha"); ok { + tmpl.CommitSha = v.(string) + } + if v, ok := d.GetOk("trigger_template.0.dir"); ok { + tmpl.Dir = v.(string) + } + if v, ok := d.GetOk("trigger_template.0.repo_name"); ok { + tmpl.RepoName = v.(string) + } + if v, ok := d.GetOk("trigger_template.0.tag_name"); ok { + tmpl.TagName = v.(string) + } + return tmpl +} + +func flattenCloudbuildBuildTriggerTemplate(d *schema.ResourceData, config *Config, t *cloudbuild.RepoSource) []map[string]interface{} { + flattened := make([]map[string]interface{}, 1) + + flattened[0] = map[string]interface{}{ + "branch_name": t.BranchName, + "commit_sha": t.CommitSha, + "dir": t.Dir, + "project": t.ProjectId, + "repo_name": t.RepoName, + "tag_name": t.TagName, + } + + return flattened +} + +func expandCloudbuildBuildTriggerBuild(d *schema.ResourceData) *cloudbuild.Build { + if d.Get("build.#").(int) == 0 { + return nil + } + + build := &cloudbuild.Build{} + if v, ok := d.GetOk("build.0.images"); ok { + build.Images = convertStringArr(v.([]interface{})) + } + if v, ok := d.GetOk("build.0.tags"); ok { + build.Tags = convertStringArr(v.([]interface{})) + } + stepCount := d.Get("build.0.step.#").(int) + build.Steps = make([]*cloudbuild.BuildStep, 0, stepCount) + for s := 0; s < stepCount; s++ { + step := &cloudbuild.BuildStep{ + Name: d.Get(fmt.Sprintf("build.0.step.%d.name", s)).(string), + } + if v, ok := d.GetOk(fmt.Sprintf("build.0.step.%d.args", s)); ok { + step.Args = strings.Split(v.(string), " ") + } + build.Steps = append(build.Steps, step) + } + return build +} + +func flattenCloudbuildBuildTriggerBuild(d *schema.ResourceData, config *Config, b *cloudbuild.Build) []map[string]interface{} { + flattened := make([]map[string]interface{}, 1) + + flattened[0] = map[string]interface{}{} + + if b.Images != nil { + flattened[0]["images"] = convertStringArrToInterface(b.Images) + } + if b.Tags != nil { + flattened[0]["tags"] = convertStringArrToInterface(b.Tags) + } + if b.Steps != nil { + steps := make([]map[string]interface{}, len(b.Steps)) + for i, step := range b.Steps { + steps[i] = map[string]interface{}{} + steps[i]["name"] = step.Name + steps[i]["args"] = strings.Join(step.Args, " ") + } + flattened[0]["step"] = steps + } + + return flattened +} + +func resourceCloudbuildBuildTriggerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the build trigger + log.Printf("[DEBUG] build trigger delete request") + _, err = config.clientBuild.Projects.Triggers.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting build trigger: %s", err) + } + + d.SetId("") + return nil +} + +func resourceCloudBuildTriggerImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + + if len(parts) == 1 { + return []*schema.ResourceData{d}, nil + } else if len(parts) == 2 { + d.Set("project", parts[0]) + d.SetId(parts[1]) + return []*schema.ResourceData{d}, nil + } else { + return nil, fmt.Errorf("Invalid import id %q. Expecting {trigger_name} or {project}/{trigger_name}", d.Id()) + } +} diff --git a/provider/terraform/resources/resource_cloudfunctions_function.go b/provider/terraform/resources/resource_cloudfunctions_function.go new file mode 100644 index 000000000000..41c877664a31 --- /dev/null +++ b/provider/terraform/resources/resource_cloudfunctions_function.go @@ -0,0 +1,650 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/cloudfunctions/v1" + + "fmt" + "log" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +// Min is 1 second, max is 9 minutes 540 sec +const functionTimeOutMax = 540 +const functionTimeOutMin = 1 +const functionDefaultTimeout = 60 + +var functionAllowedMemory = map[int]bool{ + 128: true, + 256: true, + 512: true, + 1024: true, + 2048: true, +} + +// For now CloudFunctions are allowed only in the following locations. +// Please see https://cloud.google.com/about/locations/ +var validCloudFunctionRegion = validation.StringInSlice([]string{"us-central1", "us-east1", "europe-west1", "asia-northeast1"}, true) + +const functionDefaultAllowedMemoryMb = 256 + +type cloudFunctionId struct { + Project string + Region string + Name string +} + +func (s *cloudFunctionId) cloudFunctionId() string { + return fmt.Sprintf("projects/%s/locations/%s/functions/%s", s.Project, s.Region, s.Name) +} + +func (s *cloudFunctionId) locationId() string { + return fmt.Sprintf("projects/%s/locations/%s", s.Project, s.Region) +} + +func (s *cloudFunctionId) terraformId() string { + return fmt.Sprintf("%s/%s/%s", s.Project, s.Region, s.Name) +} + +func parseCloudFunctionId(id string, config *Config) (*cloudFunctionId, error) { + parts := strings.Split(id, "/") + + cloudFuncIdRegex := regexp.MustCompile("^([a-z0-9-]+)/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") + + if cloudFuncIdRegex.MatchString(id) { + return &cloudFunctionId{ + Project: parts[0], + Region: parts[1], + Name: parts[2], + }, nil + } + + return nil, fmt.Errorf("Invalid CloudFunction id format, expecting " + + "`{projectId}/{regionId}/{cloudFunctionName}`") +} + +func joinMapKeys(mapToJoin *map[int]bool) string { + var keys []string + for key := range *mapToJoin { + keys = append(keys, strconv.Itoa(key)) + } + return strings.Join(keys, ",") +} + +func resourceCloudFunctionsFunction() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudFunctionsCreate, + Read: resourceCloudFunctionsRead, + Update: resourceCloudFunctionsUpdate, + Delete: resourceCloudFunctionsDestroy, + CustomizeDiff: resourceCloudFunctionsCustomizeDiff, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Read: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 48 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 48 characters", k)) + } + if !regexp.MustCompile("^[a-zA-Z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-zA-Z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-zA-Z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "source_archive_bucket": { + Type: schema.TypeString, + Required: true, + }, + + "source_archive_object": { + Type: schema.TypeString, + Required: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "available_memory_mb": { + Type: schema.TypeInt, + Optional: true, + Default: functionDefaultAllowedMemoryMb, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + availableMemoryMB := v.(int) + + if functionAllowedMemory[availableMemoryMB] != true { + errors = append(errors, fmt.Errorf("Allowed values for memory (in MB) are: %s . Got %d", + joinMapKeys(&functionAllowedMemory), availableMemoryMB)) + } + return + }, + }, + + "timeout": { + Type: schema.TypeInt, + Optional: true, + Default: functionDefaultTimeout, + ValidateFunc: validation.IntBetween(functionTimeOutMin, functionTimeOutMax), + }, + + "entry_point": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + }, + + "environment_variables": { + Type: schema.TypeMap, + Optional: true, + }, + + "trigger_bucket": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "This field is deprecated. Use `event_trigger` instead.", + ConflictsWith: []string{"trigger_http", "trigger_topic"}, + }, + + "trigger_http": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"trigger_bucket", "trigger_topic"}, + }, + + "trigger_topic": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "This field is deprecated. Use `event_trigger` instead.", + ConflictsWith: []string{"trigger_http", "trigger_bucket"}, + }, + + "event_trigger": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ConflictsWith: []string{"trigger_http", "retry_on_failure", "trigger_topic", "trigger_http"}, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_type": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "resource": { + Type: schema.TypeString, + Required: true, + }, + "failure_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retry": { + Type: schema.TypeBool, + // not strictly required, but this way an empty block can't be specified + Required: true, + }, + }}, + }, + }, + }, + }, + + "https_trigger_url": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "retry_on_failure": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Deprecated: "This field is deprecated. Use `event_trigger.failure_policy.retry` instead.", + ConflictsWith: []string{"trigger_http"}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validCloudFunctionRegion, + }, + }, + } +} + +func resourceCloudFunctionsCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { + if diff.HasChange("trigger_topic") { + _, n := diff.GetChange("trigger_topic") + if n == "" { + diff.Clear("trigger_topic") + } else { + diff.ForceNew("trigger_topic") + } + } + + if diff.HasChange("trigger_bucket") { + _, n := diff.GetChange("trigger_bucket") + if n == "" { + diff.Clear("trigger_bucket") + } else { + diff.ForceNew("trigger_bucket") + } + } + + return nil +} + +func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + // We do this extra validation here since most regions are not valid, and the + // error message that Cloud Functions has for "wrong region" is not specific. + // Provider-level region fetching skips validation, because it's not possible + // for the provider-level region to know about the field-level validator. + _, errs := validCloudFunctionRegion(region, "region") + if len(errs) > 0 { + return errs[0] + } + + cloudFuncId := &cloudFunctionId{ + Project: project, + Region: region, + Name: d.Get("name").(string), + } + + function := &cloudfunctions.CloudFunction{ + Name: cloudFuncId.cloudFunctionId(), + ForceSendFields: []string{}, + } + + sourceArchiveBucket := d.Get("source_archive_bucket").(string) + sourceArchiveObj := d.Get("source_archive_object").(string) + function.SourceArchiveUrl = fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) + + if v, ok := d.GetOk("available_memory_mb"); ok { + availableMemoryMb := v.(int) + function.AvailableMemoryMb = int64(availableMemoryMb) + } + + if v, ok := d.GetOk("description"); ok { + function.Description = v.(string) + } + + if v, ok := d.GetOk("entry_point"); ok { + function.EntryPoint = v.(string) + } + + if v, ok := d.GetOk("timeout"); ok { + function.Timeout = fmt.Sprintf("%vs", v.(int)) + } + + if v, ok := d.GetOk("event_trigger"); ok { + function.EventTrigger = expandEventTrigger(v.([]interface{}), project) + } else if v, ok := d.GetOk("trigger_http"); ok && v.(bool) { + function.HttpsTrigger = &cloudfunctions.HttpsTrigger{} + } else if v, ok := d.GetOk("trigger_topic"); ok { + // Make PubSub event publish as in https://cloud.google.com/functions/docs/calling/pubsub + function.EventTrigger = &cloudfunctions.EventTrigger{ + // Other events are not supported + EventType: "google.pubsub.topic.publish", + // Must be like projects/PROJECT_ID/topics/NAME + // Topic must be in same project as function + Resource: fmt.Sprintf("projects/%s/topics/%s", project, v.(string)), + } + if d.Get("retry_on_failure").(bool) { + function.EventTrigger.FailurePolicy = &cloudfunctions.FailurePolicy{ + Retry: &cloudfunctions.Retry{}, + } + } + } else if v, ok := d.GetOk("trigger_bucket"); ok { + // Make Storage event as in https://cloud.google.com/functions/docs/calling/storage + function.EventTrigger = &cloudfunctions.EventTrigger{ + EventType: "providers/cloud.storage/eventTypes/object.change", + // Must be like projects/PROJECT_ID/buckets/NAME + // Bucket must be in same project as function + Resource: fmt.Sprintf("projects/%s/buckets/%s", project, v.(string)), + } + if d.Get("retry_on_failure").(bool) { + function.EventTrigger.FailurePolicy = &cloudfunctions.FailurePolicy{ + Retry: &cloudfunctions.Retry{}, + } + } + } else { + return fmt.Errorf("One of `event_trigger` or `trigger_http` is required: " + + "You must specify a trigger when deploying a new function.") + } + + if _, ok := d.GetOk("labels"); ok { + function.Labels = expandLabels(d) + } + + if _, ok := d.GetOk("environment_variables"); ok { + function.EnvironmentVariables = expandEnvironmentVariables(d) + } + + log.Printf("[DEBUG] Creating cloud function: %s", function.Name) + op, err := config.clientCloudFunctions.Projects.Locations.Functions.Create( + cloudFuncId.locationId(), function).Do() + if err != nil { + return err + } + + // Name of function should be unique + d.SetId(cloudFuncId.terraformId()) + + err = cloudFunctionsOperationWait(config.clientCloudFunctions, op, "Creating CloudFunctions Function") + if err != nil { + return err + } + + return resourceCloudFunctionsRead(d, meta) +} + +func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + if err != nil { + return err + } + + function, err := config.clientCloudFunctions.Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) + } + + d.Set("name", cloudFuncId.Name) + d.Set("description", function.Description) + d.Set("entry_point", function.EntryPoint) + d.Set("available_memory_mb", function.AvailableMemoryMb) + sRemoved := strings.Replace(function.Timeout, "s", "", -1) + timeout, err := strconv.Atoi(sRemoved) + if err != nil { + return err + } + d.Set("timeout", timeout) + d.Set("labels", function.Labels) + d.Set("environment_variables", function.EnvironmentVariables) + if function.SourceArchiveUrl != "" { + // sourceArchiveUrl should always be a Google Cloud Storage URL (e.g. gs://bucket/object) + // https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions + sourceURL, err := url.Parse(function.SourceArchiveUrl) + if err != nil { + return err + } + bucket := sourceURL.Host + object := strings.TrimLeft(sourceURL.Path, "/") + d.Set("source_archive_bucket", bucket) + d.Set("source_archive_object", object) + } + + if function.HttpsTrigger != nil { + d.Set("trigger_http", true) + d.Set("https_trigger_url", function.HttpsTrigger.Url) + } + + d.Set("event_trigger", flattenEventTrigger(function.EventTrigger)) + if function.EventTrigger != nil { + switch function.EventTrigger.EventType { + // From https://github.com/google/google-api-go-client/blob/master/cloudfunctions/v1/cloudfunctions-gen.go#L335 + case "google.pubsub.topic.publish": + if _, ok := d.GetOk("trigger_topic"); ok { + d.Set("trigger_topic", GetResourceNameFromSelfLink(function.EventTrigger.Resource)) + } + case "providers/cloud.storage/eventTypes/object.change": + if _, ok := d.GetOk("trigger_bucket"); ok { + d.Set("trigger_bucket", GetResourceNameFromSelfLink(function.EventTrigger.Resource)) + } + } + + if _, ok := d.GetOk("retry_on_failure"); ok { + retry := function.EventTrigger.FailurePolicy != nil && function.EventTrigger.FailurePolicy.Retry != nil + d.Set("retry_on_failure", retry) + } + } + d.Set("region", cloudFuncId.Region) + d.Set("project", cloudFuncId.Project) + + return nil +} + +func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_cloudfunctions_function") + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + if err != nil { + return err + } + + d.Partial(true) + + function := cloudfunctions.CloudFunction{ + Name: cloudFuncId.cloudFunctionId(), + } + + var updateMaskArr []string + if d.HasChange("available_memory_mb") { + availableMemoryMb := d.Get("available_memory_mb").(int) + function.AvailableMemoryMb = int64(availableMemoryMb) + updateMaskArr = append(updateMaskArr, "availableMemoryMb") + } + + if d.HasChange("source_archive_bucket") || d.HasChange("source_archive_object") { + sourceArchiveBucket := d.Get("source_archive_bucket").(string) + sourceArchiveObj := d.Get("source_archive_object").(string) + function.SourceArchiveUrl = fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) + updateMaskArr = append(updateMaskArr, "sourceArchiveUrl") + } + + if d.HasChange("description") { + function.Description = d.Get("description").(string) + updateMaskArr = append(updateMaskArr, "description") + } + + if d.HasChange("timeout") { + function.Timeout = fmt.Sprintf("%vs", d.Get("timeout").(int)) + updateMaskArr = append(updateMaskArr, "timeout") + } + + if d.HasChange("labels") { + function.Labels = expandLabels(d) + updateMaskArr = append(updateMaskArr, "labels") + } + + if d.HasChange("environment_variables") { + function.EnvironmentVariables = expandEnvironmentVariables(d) + updateMaskArr = append(updateMaskArr, "environment_variables") + } + + // Event trigger will run after failure policy and take precedence + if d.HasChange("retry_on_failure") { + if d.Get("retry_on_failure").(bool) { + if function.EventTrigger == nil { + function.EventTrigger = &cloudfunctions.EventTrigger{} + } + function.EventTrigger.FailurePolicy = &cloudfunctions.FailurePolicy{ + Retry: &cloudfunctions.Retry{}, + } + } + updateMaskArr = append(updateMaskArr, "eventTrigger.failurePolicy.retry") + } + + if d.HasChange("event_trigger") { + function.EventTrigger = expandEventTrigger(d.Get("event_trigger").([]interface{}), project) + updateMaskArr = append(updateMaskArr, "eventTrigger", "eventTrigger.failurePolicy.retry") + } + + if len(updateMaskArr) > 0 { + log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function) + updateMask := strings.Join(updateMaskArr, ",") + op, err := config.clientCloudFunctions.Projects.Locations.Functions.Patch(function.Name, &function). + UpdateMask(updateMask).Do() + + if err != nil { + return fmt.Errorf("Error while updating cloudfunction configuration: %s", err) + } + + err = cloudFunctionsOperationWait(config.clientCloudFunctions, op, + "Updating CloudFunctions Function") + if err != nil { + return err + } + } + d.Partial(false) + + return resourceCloudFunctionsRead(d, meta) +} + +func resourceCloudFunctionsDestroy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + cloudFuncId, err := parseCloudFunctionId(d.Id(), config) + if err != nil { + return err + } + + op, err := config.clientCloudFunctions.Projects.Locations.Functions.Delete(cloudFuncId.cloudFunctionId()).Do() + if err != nil { + return err + } + err = cloudFunctionsOperationWait(config.clientCloudFunctions, op, "Deleting CloudFunctions Function") + if err != nil { + return err + } + + d.SetId("") + + return nil +} + +func expandEventTrigger(configured []interface{}, project string) *cloudfunctions.EventTrigger { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + eventType := data["event_type"].(string) + shape := "" + switch { + case strings.HasPrefix(eventType, "providers/cloud.storage/eventTypes/"): + shape = "projects/%s/buckets/%s" + case strings.HasPrefix(eventType, "providers/cloud.pubsub/eventTypes/"): + shape = "projects/%s/topics/%s" + } + + return &cloudfunctions.EventTrigger{ + EventType: eventType, + Resource: fmt.Sprintf(shape, project, data["resource"].(string)), + FailurePolicy: expandFailurePolicy(data["failure_policy"].([]interface{})), + } +} + +func flattenEventTrigger(eventTrigger *cloudfunctions.EventTrigger) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + if eventTrigger == nil { + return result + } + + result = append(result, map[string]interface{}{ + "event_type": eventTrigger.EventType, + "resource": GetResourceNameFromSelfLink(eventTrigger.Resource), + "failure_policy": flattenFailurePolicy(eventTrigger.FailurePolicy), + }) + + return result +} + +func expandFailurePolicy(configured []interface{}) *cloudfunctions.FailurePolicy { + if len(configured) == 0 || configured[0] == nil { + return &cloudfunctions.FailurePolicy{} + } + + if data := configured[0].(map[string]interface{}); data["retry"].(bool) { + return &cloudfunctions.FailurePolicy{ + Retry: &cloudfunctions.Retry{}, + } + } + + return nil +} + +func flattenFailurePolicy(failurePolicy *cloudfunctions.FailurePolicy) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + if failurePolicy == nil { + return nil + } + + result = append(result, map[string]interface{}{ + "retry": failurePolicy.Retry != nil, + }) + + return result +} diff --git a/provider/terraform/resources/resource_cloudiot_registry.go b/provider/terraform/resources/resource_cloudiot_registry.go new file mode 100644 index 000000000000..a7aa0d066bf4 --- /dev/null +++ b/provider/terraform/resources/resource_cloudiot_registry.go @@ -0,0 +1,374 @@ +package google + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/cloudiot/v1" +) + +const ( + mqttEnabled = "MQTT_ENABLED" + mqttDisabled = "MQTT_DISABLED" + httpEnabled = "HTTP_ENABLED" + httpDisabled = "HTTP_DISABLED" + x509CertificatePEM = "X509_CERTIFICATE_PEM" +) + +func resourceCloudIoTRegistry() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudIoTRegistryCreate, + Update: resourceCloudIoTRegistryUpdate, + Read: resourceCloudIoTRegistryRead, + Delete: resourceCloudIoTRegistryDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudIoTRegistryStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateCloudIoTID, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "event_notification_config": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + }, + }, + }, + "state_notification_config": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + }, + }, + }, + "mqtt_config": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mqtt_enabled_state": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice( + []string{mqttEnabled, mqttDisabled}, false), + }, + }, + }, + }, + "http_config": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_enabled_state": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice( + []string{httpEnabled, httpDisabled}, false), + }, + }, + }, + }, + "credentials": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "public_key_certificate": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "format": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice( + []string{x509CertificatePEM}, false), + }, + "certificate": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func buildEventNotificationConfig(config map[string]interface{}) *cloudiot.EventNotificationConfig { + if v, ok := config["pubsub_topic_name"]; ok { + return &cloudiot.EventNotificationConfig{ + PubsubTopicName: v.(string), + } + } + return nil +} + +func buildStateNotificationConfig(config map[string]interface{}) *cloudiot.StateNotificationConfig { + if v, ok := config["pubsub_topic_name"]; ok { + return &cloudiot.StateNotificationConfig{ + PubsubTopicName: v.(string), + } + } + return nil +} + +func buildMqttConfig(config map[string]interface{}) *cloudiot.MqttConfig { + if v, ok := config["mqtt_enabled_state"]; ok { + return &cloudiot.MqttConfig{ + MqttEnabledState: v.(string), + } + } + return nil +} + +func buildHttpConfig(config map[string]interface{}) *cloudiot.HttpConfig { + if v, ok := config["http_enabled_state"]; ok { + return &cloudiot.HttpConfig{ + HttpEnabledState: v.(string), + } + } + return nil +} + +func buildPublicKeyCertificate(certificate map[string]interface{}) *cloudiot.PublicKeyCertificate { + cert := &cloudiot.PublicKeyCertificate{ + Format: certificate["format"].(string), + Certificate: certificate["certificate"].(string), + } + return cert +} + +func expandCredentials(credentials []interface{}) []*cloudiot.RegistryCredential { + certificates := make([]*cloudiot.RegistryCredential, len(credentials)) + for i, raw := range credentials { + cred := raw.(map[string]interface{}) + certificates[i] = &cloudiot.RegistryCredential{ + PublicKeyCertificate: buildPublicKeyCertificate(cred["public_key_certificate"].(map[string]interface{})), + } + } + return certificates +} + +func createDeviceRegistry(d *schema.ResourceData) *cloudiot.DeviceRegistry { + deviceRegistry := &cloudiot.DeviceRegistry{} + if v, ok := d.GetOk("event_notification_config"); ok { + deviceRegistry.EventNotificationConfigs = make([]*cloudiot.EventNotificationConfig, 1, 1) + deviceRegistry.EventNotificationConfigs[0] = buildEventNotificationConfig(v.(map[string]interface{})) + } + if v, ok := d.GetOk("state_notification_config"); ok { + deviceRegistry.StateNotificationConfig = buildStateNotificationConfig(v.(map[string]interface{})) + } + if v, ok := d.GetOk("mqtt_config"); ok { + deviceRegistry.MqttConfig = buildMqttConfig(v.(map[string]interface{})) + } + if v, ok := d.GetOk("http_config"); ok { + deviceRegistry.HttpConfig = buildHttpConfig(v.(map[string]interface{})) + } + if v, ok := d.GetOk("credentials"); ok { + deviceRegistry.Credentials = expandCredentials(v.([]interface{})) + } + return deviceRegistry +} + +func resourceCloudIoTRegistryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + deviceRegistry := createDeviceRegistry(d) + deviceRegistry.Id = d.Get("name").(string) + parent := fmt.Sprintf("projects/%s/locations/%s", project, region) + registryId := fmt.Sprintf("%s/registries/%s", parent, deviceRegistry.Id) + d.SetId(registryId) + + err = retryTime(func() error { + _, err := config.clientCloudIoT.Projects.Locations.Registries.Create(parent, deviceRegistry).Do() + return err + }, 5) + if err != nil { + d.SetId("") + return err + } + return resourceCloudIoTRegistryRead(d, meta) +} + +func resourceCloudIoTRegistryUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updateMask := make([]string, 0, 5) + hasChanged := false + deviceRegistry := &cloudiot.DeviceRegistry{} + + d.Partial(true) + + if d.HasChange("event_notification_config") { + hasChanged = true + updateMask = append(updateMask, "event_notification_configs") + if v, ok := d.GetOk("event_notification_config"); ok { + deviceRegistry.EventNotificationConfigs = make([]*cloudiot.EventNotificationConfig, 1, 1) + deviceRegistry.EventNotificationConfigs[0] = buildEventNotificationConfig(v.(map[string]interface{})) + } + } + if d.HasChange("state_notification_config") { + hasChanged = true + updateMask = append(updateMask, "state_notification_config.pubsub_topic_name") + if v, ok := d.GetOk("state_notification_config"); ok { + deviceRegistry.StateNotificationConfig = buildStateNotificationConfig(v.(map[string]interface{})) + } + } + if d.HasChange("mqtt_config") { + hasChanged = true + updateMask = append(updateMask, "mqtt_config.mqtt_enabled_state") + if v, ok := d.GetOk("mqtt_config"); ok { + deviceRegistry.MqttConfig = buildMqttConfig(v.(map[string]interface{})) + } + } + if d.HasChange("http_config") { + hasChanged = true + updateMask = append(updateMask, "http_config.http_enabled_state") + if v, ok := d.GetOk("http_config"); ok { + deviceRegistry.HttpConfig = buildHttpConfig(v.(map[string]interface{})) + } + } + if d.HasChange("credentials") { + hasChanged = true + updateMask = append(updateMask, "credentials") + if v, ok := d.GetOk("credentials"); ok { + deviceRegistry.Credentials = expandCredentials(v.([]interface{})) + } + } + if hasChanged { + _, err := config.clientCloudIoT.Projects.Locations.Registries.Patch(d.Id(), + deviceRegistry).UpdateMask(strings.Join(updateMask, ",")).Do() + if err != nil { + return fmt.Errorf("Error updating registry %s: %s", d.Get("name").(string), err) + } + for _, updateMaskItem := range updateMask { + d.SetPartial(updateMaskItem) + } + } + d.Partial(false) + return resourceCloudIoTRegistryRead(d, meta) +} + +func resourceCloudIoTRegistryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + name := d.Id() + res, err := config.clientCloudIoT.Projects.Locations.Registries.Get(name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Registry %q", name)) + } + + d.Set("name", res.Id) + + if len(res.EventNotificationConfigs) > 0 { + eventConfig := map[string]string{"pubsub_topic_name": res.EventNotificationConfigs[0].PubsubTopicName} + d.Set("event_notification_config", eventConfig) + } else { + d.Set("event_notification_config", nil) + } + pubsubTopicName := res.StateNotificationConfig.PubsubTopicName + if pubsubTopicName != "" { + d.Set("state_notification_config", + map[string]string{"pubsub_topic_name": pubsubTopicName}) + } else { + d.Set("state_notification_config", nil) + } + // If no config exist for mqtt or http config default values are omitted. + mqttState := res.MqttConfig.MqttEnabledState + _, hasMqttConfig := d.GetOk("mqtt_config") + if mqttState != mqttEnabled || hasMqttConfig { + d.Set("mqtt_config", + map[string]string{"mqtt_enabled_state": mqttState}) + } + httpState := res.HttpConfig.HttpEnabledState + _, hasHttpConfig := d.GetOk("http_config") + if httpState != httpEnabled || hasHttpConfig { + d.Set("http_config", + map[string]string{"http_enabled_state": httpState}) + } + + credentials := make([]map[string]interface{}, len(res.Credentials)) + for i, item := range res.Credentials { + pubcert := make(map[string]interface{}) + pubcert["format"] = item.PublicKeyCertificate.Format + pubcert["certificate"] = item.PublicKeyCertificate.Certificate + credentials[i] = make(map[string]interface{}) + credentials[i]["public_key_certificate"] = pubcert + } + d.Set("credentials", credentials) + return nil +} + +func resourceCloudIoTRegistryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + name := d.Id() + call := config.clientCloudIoT.Projects.Locations.Registries.Delete(name) + _, err := call.Do() + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceCloudIoTRegistryStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + r, _ := regexp.Compile("^projects/(.+)/locations/(.+)/registries/(.+)$") + if !r.MatchString(d.Id()) { + return nil, fmt.Errorf("Invalid registry specifier. " + + "Expecting: projects/{project}/locations/{region}/registries/{name}") + } + parms := r.FindAllStringSubmatch(d.Id(), -1)[0] + project := parms[1] + region := parms[2] + name := parms[3] + + id := fmt.Sprintf("projects/%s/locations/%s/registries/%s", project, region, name) + d.Set("project", project) + d.Set("region", region) + d.SetId(id) + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_composer_environment.go b/provider/terraform/resources/resource_composer_environment.go new file mode 100644 index 000000000000..dc220fbe5bc5 --- /dev/null +++ b/provider/terraform/resources/resource_composer_environment.go @@ -0,0 +1,884 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/composer/v1" +) + +const ( + composerEnvironmentEnvVariablesRegexp = "[a-zA-Z_][a-zA-Z0-9_]*." + composerEnvironmentReservedAirflowEnvVarRegexp = "AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+" +) + +var composerEnvironmentReservedEnvVar = map[string]struct{}{ + "AIRFLOW_HOME": {}, + "C_FORCE_ROOT": {}, + "CONTAINER_NAME": {}, + "DAGS_FOLDER": {}, + "GCP_PROJECT": {}, + "GCS_BUCKET": {}, + "GKE_CLUSTER_NAME": {}, + "SQL_DATABASE": {}, + "SQL_INSTANCE": {}, + "SQL_PASSWORD": {}, + "SQL_PROJECT": {}, + "SQL_REGION": {}, + "SQL_USER": {}, +} + +func resourceComposerEnvironment() *schema.Resource { + return &schema.Resource{ + Create: resourceComposerEnvironmentCreate, + Read: resourceComposerEnvironmentRead, + Update: resourceComposerEnvironmentUpdate, + Delete: resourceComposerEnvironmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComposerEnvironmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + // Composer takes <= 1 hr for create/update. + Create: schema.DefaultTimeout(3600 * time.Second), + Update: schema.DefaultTimeout(3600 * time.Second), + Delete: schema.DefaultTimeout(360 * time.Second), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCPName, + }, + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(3), + }, + "node_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "machine_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + }, + "oauth_scopes": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validateServiceAccountRelativeNameOrEmail, + DiffSuppressFunc: compareServiceAccountEmailToLink, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + }, + }, + }, + }, + "software_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "airflow_config_overrides": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "pypi_packages": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ValidateFunc: validateComposerEnvironmentPypiPackages, + }, + "env_variables": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ValidateFunc: validateComposerEnvironmentEnvVariables, + }, + "image_version": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "airflow_uri": { + Type: schema.TypeString, + Computed: true, + }, + "dag_gcs_prefix": { + Type: schema.TypeString, + Computed: true, + }, + "gke_cluster": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + transformedConfig, err := expandComposerEnvironmentConfig(d.Get("config"), d, config) + if err != nil { + return err + } + + env := &composer.Environment{ + Name: envName.resourceName(), + Labels: expandLabels(d), + Config: transformedConfig, + } + + // Some fields cannot be specified during create and must be updated post-creation. + updateOnlyEnv := getComposerEnvironmentPostCreateUpdateObj(env) + + log.Printf("[DEBUG] Creating new Environment %q", envName.parentName()) + op, err := config.clientComposer.Projects.Locations.Environments.Create(envName.parentName(), env).Do() + if err != nil { + return err + } + + // Store the ID now + id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + waitErr := composerOperationWaitTime( + config.clientComposer, op, envName.Project, "Creating Environment", + int(d.Timeout(schema.TimeoutCreate).Minutes())) + + if waitErr != nil { + // The resource didn't actually get created, remove from state. + d.SetId("") + + errMsg := fmt.Sprintf("Error waiting to create Environment: %s", waitErr) + if err := handleComposerEnvironmentCreationOpFailure(id, envName, d, config); err != nil { + return fmt.Errorf("Error waiting to create Environment: %s. An initial "+ + "environment was or is still being created, and clean up failed with "+ + "error: %s.", errMsg, err) + } + + return fmt.Errorf("Error waiting to create Environment: %s", waitErr) + } + + log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), op) + + if err := resourceComposerEnvironmentPostCreateUpdate(updateOnlyEnv, d, config); err != nil { + return err + } + + return resourceComposerEnvironmentRead(d, meta) +} + +func resourceComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + res, err := config.clientComposer.Projects.Locations.Environments.Get(envName.resourceName()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("ComposerEnvironment %q", d.Id())) + } + + // Set from getProject(d) + if err := d.Set("project", envName.Project); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + // Set from getRegion(d) + if err := d.Set("region", envName.Region); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("name", GetResourceNameFromSelfLink(res.Name)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("config", flattenComposerEnvironmentConfig(res.Config)); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + if err := d.Set("labels", res.Labels); err != nil { + return fmt.Errorf("Error reading Environment: %s", err) + } + return nil +} + +func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { + tfConfig := meta.(*Config) + + d.Partial(true) + + // Composer only allows PATCHing one field at a time, so for each updatable field, we + // 1. determine if it needs to be updated + // 2. construct a PATCH object with only that field populated + // 3. call resourceComposerEnvironmentPatchField(...)to update that single field. + if d.HasChange("config") { + config, err := expandComposerEnvironmentConfig(d.Get("config"), d, tfConfig) + if err != nil { + return err + } + + if d.HasChange("config.0.software_config.0.airflow_config_overrides") { + + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + AirflowConfigOverrides: make(map[string]string), + }, + }, + } + + if config != nil && config.SoftwareConfig != nil && len(config.SoftwareConfig.AirflowConfigOverrides) > 0 { + patchObj.Config.SoftwareConfig.AirflowConfigOverrides = config.SoftwareConfig.AirflowConfigOverrides + } + + err = resourceComposerEnvironmentPatchField("config.softwareConfig.airflowConfigOverrides", patchObj, d, tfConfig) + if err != nil { + return err + } + d.SetPartial("config") + } + + if d.HasChange("config.0.software_config.0.env_variables") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + EnvVariables: make(map[string]string), + }, + }, + } + if config != nil && config.SoftwareConfig != nil && len(config.SoftwareConfig.EnvVariables) > 0 { + patchObj.Config.SoftwareConfig.EnvVariables = config.SoftwareConfig.EnvVariables + } + + err = resourceComposerEnvironmentPatchField("config.softwareConfig.envVariables", patchObj, d, tfConfig) + if err != nil { + return err + } + d.SetPartial("config") + } + + if d.HasChange("config.0.software_config.0.pypi_packages") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + PypiPackages: make(map[string]string), + }, + }, + } + if config != nil && config.SoftwareConfig != nil && config.SoftwareConfig.PypiPackages != nil { + patchObj.Config.SoftwareConfig.PypiPackages = config.SoftwareConfig.PypiPackages + } + + err = resourceComposerEnvironmentPatchField("config.softwareConfig.pypiPackages", patchObj, d, tfConfig) + if err != nil { + return err + } + d.SetPartial("config") + } + + if d.HasChange("config.0.node_count") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.NodeCount = config.NodeCount + } + err = resourceComposerEnvironmentPatchField("config.nodeCount", patchObj, d, tfConfig) + if err != nil { + return err + } + d.SetPartial("config") + } + } + + if d.HasChange("labels") { + patchEnv := &composer.Environment{Labels: expandLabels(d)} + err := resourceComposerEnvironmentPatchField("labels", patchEnv, d, tfConfig) + if err != nil { + return err + } + d.SetPartial("labels") + } + + d.Partial(false) + return resourceComposerEnvironmentRead(d, tfConfig) +} + +func resourceComposerEnvironmentPostCreateUpdate(updateEnv *composer.Environment, d *schema.ResourceData, cfg *Config) error { + if updateEnv == nil { + return nil + } + + d.Partial(true) + + if updateEnv.Config != nil && updateEnv.Config.SoftwareConfig != nil && len(updateEnv.Config.SoftwareConfig.PypiPackages) > 0 { + log.Printf("[DEBUG] Running post-create update for Environment %q", d.Id()) + err := resourceComposerEnvironmentPatchField("config.softwareConfig.pypiPackages", updateEnv, d, cfg) + if err != nil { + return err + } + + log.Printf("[DEBUG] Finish update to Environment %q post create for update only fields", d.Id()) + d.SetPartial("config") + } + d.Partial(false) + return resourceComposerEnvironmentRead(d, cfg) +} + +func resourceComposerEnvironmentPatchField(updateMask string, env *composer.Environment, d *schema.ResourceData, config *Config) error { + envJson, _ := env.MarshalJSON() + log.Printf("[DEBUG] Updating Environment %q (updateMask = %q): %s", d.Id(), updateMask, string(envJson)) + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + op, err := config.clientComposer.Projects.Locations.Environments. + Patch(envName.resourceName(), env). + UpdateMask(updateMask).Do() + if err != nil { + return err + } + + waitErr := composerOperationWaitTime( + config.clientComposer, op, envName.Project, "Updating newly created Environment", + int(d.Timeout(schema.TimeoutCreate).Minutes())) + if waitErr != nil { + // The resource didn't actually update. + return fmt.Errorf("Error waiting to update Environment: %s", waitErr) + } + + log.Printf("[DEBUG] Finished updating Environment %q (updateMask = %q)", d.Id(), updateMask) + return nil +} + +func resourceComposerEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting Environment %q", d.Id()) + op, err := config.clientComposer.Projects.Locations.Environments.Delete(envName.resourceName()).Do() + if err != nil { + return err + } + + err = composerOperationWaitTime( + config.clientComposer, op, envName.Project, "Deleting Environment", + int(d.Timeout(schema.TimeoutDelete).Minutes())) + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), op) + return nil +} + +func resourceComposerEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "{{project}}/{{region}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComposerEnvironmentConfig(envCfg *composer.EnvironmentConfig) interface{} { + if envCfg == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["gke_cluster"] = envCfg.GkeCluster + transformed["dag_gcs_prefix"] = envCfg.DagGcsPrefix + transformed["node_count"] = envCfg.NodeCount + transformed["airflow_uri"] = envCfg.AirflowUri + transformed["node_config"] = flattenComposerEnvironmentConfigNodeConfig(envCfg.NodeConfig) + transformed["software_config"] = flattenComposerEnvironmentConfigSoftwareConfig(envCfg.SoftwareConfig) + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigNodeConfig(nodeCfg *composer.NodeConfig) interface{} { + if nodeCfg == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["zone"] = nodeCfg.Location + transformed["machine_type"] = nodeCfg.MachineType + transformed["network"] = nodeCfg.Network + transformed["subnetwork"] = nodeCfg.Subnetwork + transformed["disk_size_gb"] = nodeCfg.DiskSizeGb + transformed["service_account"] = nodeCfg.ServiceAccount + transformed["oauth_scopes"] = flattenComposerEnvironmentConfigNodeConfigOauthScopes(nodeCfg.OauthScopes) + transformed["tags"] = flattenComposerEnvironmentConfigNodeConfigTags(nodeCfg.Tags) + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigNodeConfigOauthScopes(v interface{}) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, convertStringArrToInterface(v.([]string))) +} + +func flattenComposerEnvironmentConfigNodeConfigTags(v interface{}) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, convertStringArrToInterface(v.([]string))) +} + +func flattenComposerEnvironmentConfigSoftwareConfig(softwareCfg *composer.SoftwareConfig) interface{} { + if softwareCfg == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["image_version"] = softwareCfg.ImageVersion + transformed["airflow_config_overrides"] = softwareCfg.AirflowConfigOverrides + transformed["pypi_packages"] = softwareCfg.PypiPackages + transformed["env_variables"] = softwareCfg.EnvVariables + return []interface{}{transformed} +} + +func expandComposerEnvironmentConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.EnvironmentConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + original := l[0].(map[string]interface{}) + transformed := &composer.EnvironmentConfig{} + + if nodeCountRaw, ok := original["node_count"]; ok { + transformedNodeCount, err := expandComposerEnvironmentConfigNodeCount(nodeCountRaw, d, config) + if err != nil { + return nil, err + } + transformed.NodeCount = transformedNodeCount + } + + transformedNodeConfig, err := expandComposerEnvironmentConfigNodeConfig(original["node_config"], d, config) + if err != nil { + return nil, err + } + transformed.NodeConfig = transformedNodeConfig + + transformedSoftwareConfig, err := expandComposerEnvironmentConfigSoftwareConfig(original["software_config"], d, config) + if err != nil { + return nil, err + } + transformed.SoftwareConfig = transformedSoftwareConfig + return transformed, nil +} + +func expandComposerEnvironmentConfigNodeCount(v interface{}, d *schema.ResourceData, config *Config) (int64, error) { + if v == nil { + return 0, nil + } + return int64(v.(int)), nil +} + +func expandComposerEnvironmentConfigNodeConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.NodeConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.NodeConfig{} + + if transformedDiskSizeGb, ok := original["disk_size_gb"]; ok { + transformed.DiskSizeGb = int64(transformedDiskSizeGb.(int)) + } + + if v, ok := original["service_account"]; ok { + transformedServiceAccount, err := expandComposerEnvironmentServiceAccount(v, d, config) + if err != nil { + return nil, err + } + transformed.ServiceAccount = transformedServiceAccount + } + + var nodeConfigZone string + if v, ok := original["zone"]; ok { + transformedZone, err := expandComposerEnvironmentZone(v, d, config) + if err != nil { + return nil, err + } + transformed.Location = transformedZone + nodeConfigZone = transformedZone + } + + if v, ok := original["machine_type"]; ok { + transformedMachineType, err := expandComposerEnvironmentMachineType(v, d, config, nodeConfigZone) + if err != nil { + return nil, err + } + transformed.MachineType = transformedMachineType + } + + if v, ok := original["network"]; ok { + transformedNetwork, err := expandComposerEnvironmentNetwork(v, d, config) + if err != nil { + return nil, err + } + transformed.Network = transformedNetwork + } + + if v, ok := original["subnetwork"]; ok { + transformedSubnetwork, err := expandComposerEnvironmentSubnetwork(v, d, config) + if err != nil { + return nil, err + } + transformed.Subnetwork = transformedSubnetwork + } + + transformedOauthScopes, err := expandComposerEnvironmentSetList(original["oauth_scopes"], d, config) + if err != nil { + return nil, err + } + transformed.OauthScopes = transformedOauthScopes + + transformedTags, err := expandComposerEnvironmentSetList(original["tags"], d, config) + if err != nil { + return nil, err + } + transformed.Tags = transformedTags + return transformed, nil +} + +func expandComposerEnvironmentServiceAccount(v interface{}, d *schema.ResourceData, config *Config) (string, error) { + serviceAccount := v.(string) + if len(serviceAccount) == 0 { + return "", nil + } + + return GetResourceNameFromSelfLink(serviceAccount), nil +} + +func expandComposerEnvironmentZone(v interface{}, d *schema.ResourceData, config *Config) (string, error) { + zone := v.(string) + if len(zone) == 0 { + return zone, nil + } + if !strings.Contains(zone, "/") { + project, err := getProject(d, config) + if err != nil { + return "", err + } + return fmt.Sprintf("projects/%s/zones/%s", project, zone), nil + } + + return getRelativePath(zone) +} + +func expandComposerEnvironmentMachineType(v interface{}, d *schema.ResourceData, config *Config, nodeCfgZone interface{}) (string, error) { + fv, err := ParseMachineTypesFieldValue(v.(string), d, config) + if err != nil { + return "", nil + } + return fv.RelativeLink(), nil +} + +func expandComposerEnvironmentNetwork(v interface{}, d *schema.ResourceData, config *Config) (string, error) { + fv, err := ParseNetworkFieldValue(v.(string), d, config) + if err != nil { + return "", err + } + return fv.RelativeLink(), nil +} + +func expandComposerEnvironmentSubnetwork(v interface{}, d *schema.ResourceData, config *Config) (string, error) { + fv, err := ParseSubnetworkFieldValue(v.(string), d, config) + if err != nil { + return "", err + } + return fv.RelativeLink(), nil +} + +func expandComposerEnvironmentSetList(v interface{}, d *schema.ResourceData, config *Config) ([]string, error) { + if v == nil { + return nil, nil + } + return convertStringArr(v.(*schema.Set).List()), nil +} + +func expandComposerEnvironmentConfigSoftwareConfig(v interface{}, d *schema.ResourceData, config *Config) (*composer.SoftwareConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.SoftwareConfig{} + + transformed.ImageVersion = original["image_version"].(string) + transformed.AirflowConfigOverrides = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "airflow_config_overrides") + transformed.PypiPackages = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "pypi_packages") + transformed.EnvVariables = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "env_variables") + return transformed, nil +} + +func expandComposerEnvironmentConfigSoftwareConfigStringMap(softwareConfig map[string]interface{}, k string) map[string]string { + v, ok := softwareConfig[k] + if ok && v != nil { + return convertStringMap(v.(map[string]interface{})) + } + return map[string]string{} +} + +func validateComposerEnvironmentPypiPackages(v interface{}, k string) (ws []string, errors []error) { + if v == nil { + return ws, errors + } + for pkgName := range v.(map[string]interface{}) { + if pkgName != strings.ToLower(pkgName) { + errors = append(errors, + fmt.Errorf("PYPI package %q can only contain lowercase characters", pkgName)) + } + } + + return ws, errors +} + +func validateComposerEnvironmentEnvVariables(v interface{}, k string) (ws []string, errors []error) { + if v == nil { + return ws, errors + } + + reEnvVarName := regexp.MustCompile(composerEnvironmentEnvVariablesRegexp) + reAirflowReserved := regexp.MustCompile(composerEnvironmentReservedAirflowEnvVarRegexp) + + for envVarName := range v.(map[string]interface{}) { + if !reEnvVarName.MatchString(envVarName) { + errors = append(errors, + fmt.Errorf("env_variable %q must match regexp %q", envVarName, composerEnvironmentEnvVariablesRegexp)) + } else if _, ok := composerEnvironmentReservedEnvVar[envVarName]; ok { + errors = append(errors, + fmt.Errorf("env_variable %q is a reserved name and cannot be used", envVarName)) + } else if reAirflowReserved.MatchString(envVarName) { + errors = append(errors, + fmt.Errorf("env_variable %q cannot match reserved Airflow variable names with regexp %q", + envVarName, composerEnvironmentReservedAirflowEnvVarRegexp)) + } + } + + return ws, errors +} + +func handleComposerEnvironmentCreationOpFailure(id string, envName *composerEnvironmentName, d *schema.ResourceData, config *Config) error { + log.Printf("[WARNING] Creation operation for Composer Environment %q failed, check Environment isn't still running", id) + // Try to get possible created but invalid environment. + env, err := config.clientComposer.Projects.Locations.Environments.Get(envName.resourceName()).Do() + if err != nil { + // If error is 401, we don't have to clean up environment, return nil. + // Otherwise, we encountered another error. + return handleNotFoundError(err, d, fmt.Sprintf("Composer Environment %q", envName.resourceName())) + } + + if env.State == "CREATING" { + return fmt.Errorf( + "Getting creation operation state failed while waiting for environment to finish creating, "+ + "but environment seems to still be in 'CREATING' state. Wait for operation to finish and either "+ + "manually delete environment or import %q into your state", id) + } + + log.Printf("[WARNING] Environment %q from failed creation operation was created, deleting.", id) + op, err := config.clientComposer.Projects.Locations.Environments.Delete(envName.resourceName()).Do() + if err != nil { + return fmt.Errorf("Could not delete the invalid created environment with state %q: %s", env.State, err) + } + + waitErr := composerOperationWaitTime( + config.clientComposer, op, envName.Project, + fmt.Sprintf("Deleting invalid created Environment with state %q", env.State), + int(d.Timeout(schema.TimeoutCreate).Minutes())) + if waitErr != nil { + return fmt.Errorf("Error waiting to delete invalid Environment with state %q: %s", env.State, waitErr) + } + + return nil +} + +func getComposerEnvironmentPostCreateUpdateObj(env *composer.Environment) (updateEnv *composer.Environment) { + // pypiPackages can only be added via update + if env != nil && env.Config != nil && env.Config.SoftwareConfig != nil { + if len(env.Config.SoftwareConfig.PypiPackages) > 0 { + updateEnv = &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + PypiPackages: env.Config.SoftwareConfig.PypiPackages, + }, + }, + } + // Clear PYPI packages - otherwise, API will return error + // that the create request is invalid. + env.Config.SoftwareConfig.PypiPackages = make(map[string]string) + } + } + + return updateEnv +} + +func resourceComposerEnvironmentName(d *schema.ResourceData, config *Config) (*composerEnvironmentName, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + region, err := getRegion(d, config) + if err != nil { + return nil, err + } + + return &composerEnvironmentName{ + Project: project, + Region: region, + Environment: d.Get("name").(string), + }, nil +} + +type composerEnvironmentName struct { + Project string + Region string + Environment string +} + +func (n *composerEnvironmentName) resourceName() string { + return fmt.Sprintf("projects/%s/locations/%s/environments/%s", n.Project, n.Region, n.Environment) +} + +func (n *composerEnvironmentName) parentName() string { + return fmt.Sprintf("projects/%s/locations/%s", n.Project, n.Region) +} + +// The value we store (i.e. `old` in this method), might be only the service account email, +// but we expect either the email or the name (projects/.../serviceAccounts/...) +func compareServiceAccountEmailToLink(_, old, new string, _ *schema.ResourceData) bool { + // old is the service account email returned from the server. + if !strings.HasPrefix("projects/", old) { + return old == GetResourceNameFromSelfLink(new) + } + return compareSelfLinkRelativePaths("", old, new, nil) +} + +func validateServiceAccountRelativeNameOrEmail(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + serviceAccountRe := "(" + strings.Join(PossibleServiceAccountNames, "|") + ")" + if strings.HasPrefix(value, "projects/") { + serviceAccountRe = fmt.Sprintf("projects/(.+)/serviceAccounts/%s", serviceAccountRe) + } + r := regexp.MustCompile(serviceAccountRe) + if !r.MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, serviceAccountRe)) + } + + return +} diff --git a/provider/terraform/resources/resource_compute_attached_disk.go b/provider/terraform/resources/resource_compute_attached_disk.go new file mode 100644 index 000000000000..829ab484f0f2 --- /dev/null +++ b/provider/terraform/resources/resource_compute_attached_disk.go @@ -0,0 +1,216 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + compute "google.golang.org/api/compute/v1" +) + +func resourceComputeAttachedDisk() *schema.Resource { + return &schema.Resource{ + Create: resourceAttachedDiskCreate, + Read: resourceAttachedDiskRead, + Delete: resourceAttachedDiskDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAttachedDiskImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(300 * time.Second), + Delete: schema.DefaultTimeout(300 * time.Second), + }, + + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "project": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + }, + "zone": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + }, + "device_name": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + "mode": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: "READ_WRITE", + ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false), + }, + }, + } +} + +func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + + diskName := GetResourceNameFromSelfLink(d.Get("disk").(string)) + + attachedDisk := compute.AttachedDisk{ + Source: fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName), + Mode: d.Get("mode").(string), + DeviceName: d.Get("device_name").(string), + } + + op, err := config.clientCompute.Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do() + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s:%s", zv.Name, diskName)) + + waitErr := computeSharedOperationWaitTime(config.clientCompute, op, zv.Project, + int(d.Timeout(schema.TimeoutCreate).Minutes()), "disk to attach") + if waitErr != nil { + d.SetId("") + return waitErr + } + + return resourceAttachedDiskRead(d, meta) +} + +func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + d.Set("project", zv.Project) + d.Set("zone", zv.Zone) + + diskName := GetResourceNameFromSelfLink(d.Get("disk").(string)) + + instance, err := config.clientCompute.Instances.Get(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return err + } + + // Iterate through the instance's attached disks as this is the only way to + // confirm the disk is actually attached + ad := findDiskByName(instance.Disks, diskName) + if ad == nil { + log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.") + d.SetId("") + return nil + } + + d.Set("device_name", ad.DeviceName) + d.Set("mode", ad.Mode) + + // Force the referenced resources to a self-link in state because it's more specific then name. + instancePath, err := getRelativePath(instance.SelfLink) + if err != nil { + return err + } + d.Set("instance", instancePath) + diskPath, err := getRelativePath(ad.Source) + if err != nil { + return err + } + d.Set("disk", diskPath) + + return nil +} + +func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + + diskName := GetResourceNameFromSelfLink(d.Get("disk").(string)) + + instance, err := config.clientCompute.Instances.Get(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return err + } + + // Confirm the disk is still attached before making the call to detach it. If the disk isn't listed as an attached + // disk on the compute instance then return as though the delete call succeed since this is the desired state. + ad := findDiskByName(instance.Disks, diskName) + if ad == nil { + return nil + } + + op, err := config.clientCompute.Instances.DetachDisk(zv.Project, zv.Zone, zv.Name, ad.DeviceName).Do() + if err != nil { + return err + } + + waitErr := computeSharedOperationWaitTime(config.clientCompute, op, zv.Project, + int(d.Timeout(schema.TimeoutDelete).Minutes()), fmt.Sprintf("Detaching disk from %s", zv.Name)) + if waitErr != nil { + return waitErr + } + + return nil +} + +func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + err := parseImportId( + []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/[^/]+", + "(?P[^/]+)/(?P[^/]+)/[^/]+"}, d, config) + if err != nil { + return nil, err + } + + // In all acceptable id formats the actual id will be the last in the path + id := GetResourceNameFromSelfLink(d.Id()) + d.SetId(id) + + IDParts := strings.Split(d.Id(), ":") + if len(IDParts) != 2 { + return nil, fmt.Errorf("unable to determine attached disk id - id should be '{google_compute_instance.name}:{google_compute_disk.name}'") + } + d.Set("instance", IDParts[0]) + d.Set("disk", IDParts[1]) + + return []*schema.ResourceData{d}, nil +} + +func findDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk { + for _, disk := range disks { + if compareSelfLinkOrResourceName("", disk.Source, id, nil) { + return disk + } + } + + return nil +} diff --git a/provider/terraform/resources/resource_compute_backend_service.go b/provider/terraform/resources/resource_compute_backend_service.go new file mode 100644 index 000000000000..fd747ed57e58 --- /dev/null +++ b/provider/terraform/resources/resource_compute_backend_service.go @@ -0,0 +1,632 @@ +package google + +import ( + "crypto/sha256" + "errors" + "fmt" + "log" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" + computeBeta "google.golang.org/api/compute/v0.beta" +) + +func resourceComputeBackendService() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeBackendServiceCreate, + Read: resourceComputeBackendServiceRead, + Update: resourceComputeBackendServiceUpdate, + Delete: resourceComputeBackendServiceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCPName, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: selfLinkRelativePathHash, + Required: true, + MinItems: 1, + MaxItems: 1, + }, + + "iap": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oauth2_client_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "oauth2_client_secret": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Sensitive: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == fmt.Sprintf("%x", sha256.Sum256([]byte(new))) { + return true + } + return false + }, + }, + }, + }, + }, + + "backend": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Set: resourceGoogleComputeBackendServiceBackendHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + "balancing_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "UTILIZATION", + }, + "capacity_scaler": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Default: 1, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "max_rate": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "max_rate_per_instance": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + }, + "max_connections": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "max_connections_per_instance": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "max_utilization": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Default: 0.8, + }, + }, + }, + }, + + "cdn_policy": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cache_key_policy": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "include_host": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "include_protocol": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "include_query_string": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "query_string_blacklist": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"cdn_policy.0.cache_key_policy.query_string_whitelist"}, + }, + "query_string_whitelist": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"cdn_policy.0.cache_key_policy.query_string_blacklist"}, + }, + }, + }, + }, + }, + }, + }, + + "custom_request_headers": &schema.Schema{ + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "enable_cdn": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "port_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Removed: "region has been removed as it was never used. For internal load balancing, use google_compute_region_backend_service", + }, + + "security_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "connection_draining_timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 300, + }, + }, + } +} + +func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + service, err := expandBackendService(d) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Backend Service: %#v", service) + op, err := config.clientComputeBeta.BackendServices.Insert( + project, service).Do() + if err != nil { + return fmt.Errorf("Error creating backend service: %s", err) + } + + log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op) + + // Store the ID now + d.SetId(service.Name) + + // Wait for the operation to complete + waitErr := computeSharedOperationWait(config.clientCompute, op, project, "Creating Backend Service") + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + if v, ok := d.GetOk("security_policy"); ok { + pol, err := ParseSecurityPolicyFieldValue(v.(string), d, config) + op, err := config.clientComputeBeta.BackendServices.SetSecurityPolicy( + project, service.Name, &computeBeta.SecurityPolicyReference{ + SecurityPolicy: pol.RelativeLink(), + }).Do() + if err != nil { + return errwrap.Wrapf("Error setting Backend Service security policy: {{err}}", err) + } + waitErr := computeSharedOperationWait(config.clientCompute, op, project, "Adding Backend Service Security Policy") + if waitErr != nil { + return waitErr + } + } + + return resourceComputeBackendServiceRead(d, meta) +} + +func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + service, err := config.clientComputeBeta.BackendServices.Get(project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Backend Service %q", d.Get("name").(string))) + } + + d.Set("name", service.Name) + d.Set("description", service.Description) + d.Set("enable_cdn", service.EnableCDN) + d.Set("port_name", service.PortName) + d.Set("protocol", service.Protocol) + d.Set("session_affinity", service.SessionAffinity) + d.Set("timeout_sec", service.TimeoutSec) + d.Set("fingerprint", service.Fingerprint) + d.Set("self_link", ConvertSelfLinkToV1(service.SelfLink)) + d.Set("backend", flattenBackends(service.Backends)) + d.Set("connection_draining_timeout_sec", service.ConnectionDraining.DrainingTimeoutSec) + d.Set("iap", flattenIap(service.Iap)) + d.Set("project", project) + d.Set("health_checks", service.HealthChecks) + if err := d.Set("cdn_policy", flattenCdnPolicy(service.CdnPolicy)); err != nil { + return err + } + d.Set("security_policy", service.SecurityPolicy) + d.Set("custom_request_headers", service.CustomRequestHeaders) + + return nil +} + +func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + service, err := expandBackendService(d) + if err != nil { + return err + } + service.Fingerprint = d.Get("fingerprint").(string) + + project, err := getProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) + op, err := config.clientComputeBeta.BackendServices.Update( + project, d.Id(), service).Do() + if err != nil { + return fmt.Errorf("Error updating backend service: %s", err) + } + + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating Backend Service") + if err != nil { + return err + } + + if d.HasChange("security_policy") { + pol, err := ParseSecurityPolicyFieldValue(d.Get("security_policy").(string), d, config) + if err != nil { + return err + } + op, err := config.clientComputeBeta.BackendServices.SetSecurityPolicy( + project, service.Name, &computeBeta.SecurityPolicyReference{ + SecurityPolicy: pol.RelativeLink(), + }).Do() + if err != nil { + return err + } + waitErr := computeSharedOperationWait(config.clientCompute, op, project, "Adding Backend Service Security Policy") + if waitErr != nil { + return waitErr + } + } + + return resourceComputeBackendServiceRead(d, meta) +} + +func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting backend service %s", d.Id()) + op, err := config.clientCompute.BackendServices.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting backend service: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Deleting Backend Service") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func expandIap(configured []interface{}) *computeBeta.BackendServiceIAP { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &computeBeta.BackendServiceIAP{ + Enabled: true, + Oauth2ClientId: data["oauth2_client_id"].(string), + Oauth2ClientSecret: data["oauth2_client_secret"].(string), + ForceSendFields: []string{"Enabled", "Oauth2ClientId", "Oauth2ClientSecret"}, + } +} + +func flattenIap(iap *computeBeta.BackendServiceIAP) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + if iap == nil || !iap.Enabled { + return result + } + + result = append(result, map[string]interface{}{ + "oauth2_client_id": iap.Oauth2ClientId, + "oauth2_client_secret": iap.Oauth2ClientSecretSha256, + }) + + return result +} + +func expandBackends(configured []interface{}) ([]*computeBeta.Backend, error) { + backends := make([]*computeBeta.Backend, 0, len(configured)) + + for _, raw := range configured { + data := raw.(map[string]interface{}) + + g, ok := data["group"] + if !ok { + return nil, errors.New("google_compute_backend_service.backend.group must be set") + } + + b := computeBeta.Backend{ + Group: g.(string), + } + + if v, ok := data["balancing_mode"]; ok { + b.BalancingMode = v.(string) + } + if v, ok := data["capacity_scaler"]; ok { + b.CapacityScaler = v.(float64) + b.ForceSendFields = append(b.ForceSendFields, "CapacityScaler") + } + if v, ok := data["description"]; ok { + b.Description = v.(string) + } + if v, ok := data["max_rate"]; ok { + b.MaxRate = int64(v.(int)) + if b.MaxRate == 0 { + b.NullFields = append(b.NullFields, "MaxRate") + } + } + if v, ok := data["max_rate_per_instance"]; ok { + b.MaxRatePerInstance = v.(float64) + if b.MaxRatePerInstance == 0 { + b.NullFields = append(b.NullFields, "MaxRatePerInstance") + } + } + if v, ok := data["max_connections"]; ok { + b.MaxConnections = int64(v.(int)) + if b.MaxConnections == 0 { + b.NullFields = append(b.NullFields, "MaxConnections") + } + } + if v, ok := data["max_connections_per_instance"]; ok { + b.MaxConnectionsPerInstance = int64(v.(int)) + if b.MaxConnectionsPerInstance == 0 { + b.NullFields = append(b.NullFields, "MaxConnectionsPerInstance") + } + } + if v, ok := data["max_utilization"]; ok { + b.MaxUtilization = v.(float64) + b.ForceSendFields = append(b.ForceSendFields, "MaxUtilization") + } + + backends = append(backends, &b) + } + + return backends, nil +} + +func flattenBackends(backends []*computeBeta.Backend) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(backends)) + + for _, b := range backends { + data := make(map[string]interface{}) + + data["balancing_mode"] = b.BalancingMode + data["capacity_scaler"] = b.CapacityScaler + data["description"] = b.Description + data["group"] = b.Group + data["max_rate"] = b.MaxRate + data["max_rate_per_instance"] = b.MaxRatePerInstance + data["max_connections"] = b.MaxConnections + data["max_connections_per_instance"] = b.MaxConnectionsPerInstance + data["max_utilization"] = b.MaxUtilization + result = append(result, data) + } + + return result +} + +func expandBackendService(d *schema.ResourceData) (*computeBeta.BackendService, error) { + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + // The IAP service is enabled and disabled by adding or removing + // the IAP configuration block (and providing the client id + // and secret). We are force sending the three required API fields + // to enable/disable IAP at all times here, and relying on Golang's + // type defaults to enable or disable IAP in the existence or absence + // of the block, instead of checking if the block exists, zeroing out + // fields, etc. + service := &computeBeta.BackendService{ + Name: d.Get("name").(string), + HealthChecks: healthChecks, + Iap: &computeBeta.BackendServiceIAP{ + ForceSendFields: []string{"Enabled", "Oauth2ClientId", "Oauth2ClientSecret"}, + }, + CdnPolicy: &computeBeta.BackendServiceCdnPolicy{ + CacheKeyPolicy: &computeBeta.CacheKeyPolicy{ + ForceSendFields: []string{"IncludeProtocol", "IncludeHost", "IncludeQueryString", "QueryStringWhitelist", "QueryStringBlacklist"}, + }, + }, + CustomRequestHeaders: convertStringSet(d.Get("custom_request_headers").(*schema.Set)), + } + + if v, ok := d.GetOk("iap"); ok { + service.Iap = expandIap(v.([]interface{})) + } + + var err error + if v, ok := d.GetOk("backend"); ok { + service.Backends, err = expandBackends(v.(*schema.Set).List()) + if err != nil { + return nil, err + } + } + + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + + if v, ok := d.GetOk("port_name"); ok { + service.PortName = v.(string) + } + + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } + + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + if v, ok := d.GetOk("enable_cdn"); ok { + service.EnableCDN = v.(bool) + } + + connectionDrainingTimeoutSec := d.Get("connection_draining_timeout_sec") + connectionDraining := &computeBeta.ConnectionDraining{ + DrainingTimeoutSec: int64(connectionDrainingTimeoutSec.(int)), + } + + service.ConnectionDraining = connectionDraining + + if v, ok := d.GetOk("cdn_policy"); ok { + c := expandCdnPolicy(v.([]interface{})) + if c != nil { + service.CdnPolicy = c + } + } + + return service, nil +} + +func expandCdnPolicy(configured []interface{}) *computeBeta.BackendServiceCdnPolicy { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + ckp := data["cache_key_policy"].([]interface{}) + if len(ckp) == 0 { + return nil + } + ckpData := ckp[0].(map[string]interface{}) + + return &computeBeta.BackendServiceCdnPolicy{ + CacheKeyPolicy: &computeBeta.CacheKeyPolicy{ + IncludeHost: ckpData["include_host"].(bool), + IncludeProtocol: ckpData["include_protocol"].(bool), + IncludeQueryString: ckpData["include_query_string"].(bool), + QueryStringBlacklist: convertStringSet(ckpData["query_string_blacklist"].(*schema.Set)), + QueryStringWhitelist: convertStringSet(ckpData["query_string_whitelist"].(*schema.Set)), + ForceSendFields: []string{"IncludeProtocol", "IncludeHost", "IncludeQueryString", "QueryStringWhitelist", "QueryStringBlacklist"}, + }, + } +} + +func flattenCdnPolicy(pol *computeBeta.BackendServiceCdnPolicy) []map[string]interface{} { + result := []map[string]interface{}{} + if pol == nil || pol.CacheKeyPolicy == nil { + return result + } + + return append(result, map[string]interface{}{ + "cache_key_policy": []map[string]interface{}{ + { + "include_host": pol.CacheKeyPolicy.IncludeHost, + "include_protocol": pol.CacheKeyPolicy.IncludeProtocol, + "include_query_string": pol.CacheKeyPolicy.IncludeQueryString, + "query_string_blacklist": schema.NewSet(schema.HashString, convertStringArrToInterface(pol.CacheKeyPolicy.QueryStringBlacklist)), + "query_string_whitelist": schema.NewSet(schema.HashString, convertStringArrToInterface(pol.CacheKeyPolicy.QueryStringWhitelist)), + }, + }, + }) +} diff --git a/provider/terraform/resources/resource_compute_backend_service_migrate.go b/provider/terraform/resources/resource_compute_backend_service_migrate.go new file mode 100644 index 000000000000..386c5cfd6bff --- /dev/null +++ b/provider/terraform/resources/resource_compute_backend_service_migrate.go @@ -0,0 +1,135 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "bytes" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeBackendServiceMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Backend Service State v0; migrating to v1") + is, err := migrateBackendServiceStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateBackendServiceStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + oldHashToValue := map[string]map[string]interface{}{} + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "backend.") || k == "backend.#" { + continue + } + + // Key is now of the form backend.%d.%s + kParts := strings.Split(k, ".") + + // Sanity check: two parts should be there and should be a number + badFormat := false + if len(kParts) != 3 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf("migration error: found backend key in unexpected format: %s", k) + } + + if oldHashToValue[kParts[1]] == nil { + oldHashToValue[kParts[1]] = map[string]interface{}{} + } + oldHashToValue[kParts[1]][kParts[2]] = v + } + + oldHashToNewHash := map[string]int{} + for k, v := range oldHashToValue { + oldHashToNewHash[k] = resourceGoogleComputeBackendServiceBackendHash(v) + } + + values := map[string]string{} + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "backend.") { + continue + } + + if k == "backend.#" { + continue + } + + // Key is now of the form backend.%d.%s + kParts := strings.Split(k, ".") + newKey := fmt.Sprintf("%s.%d.%s", kParts[0], oldHashToNewHash[kParts[1]], kParts[2]) + values[newKey] = v + delete(is.Attributes, k) + } + + for k, v := range values { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func resourceGoogleComputeBackendServiceBackendHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if group, err := getRelativePath(m["group"].(string)); err != nil { + log.Printf("[WARN] Error on retrieving relative path of instance group: %s", err) + buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) + } else { + buf.WriteString(fmt.Sprintf("%s-", group)) + } + + if v, ok := m["balancing_mode"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["capacity_scaler"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + if v, ok := m["description"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["max_rate"]; ok { + buf.WriteString(fmt.Sprintf("%d-", int64(v.(int)))) + } + if v, ok := m["max_rate_per_instance"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + if v, ok := m["max_connections"]; ok { + buf.WriteString(fmt.Sprintf("%d-", int64(v.(int)))) + } + if v, ok := m["max_connections_per_instance"]; ok { + buf.WriteString(fmt.Sprintf("%d-", int64(v.(int)))) + } + if v, ok := m["max_rate_per_instance"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + + return hashcode.String(buf.String()) +} diff --git a/provider/terraform/resources/resource_compute_firewall_migrate.go b/provider/terraform/resources/resource_compute_firewall_migrate.go new file mode 100644 index 000000000000..8509075f4756 --- /dev/null +++ b/provider/terraform/resources/resource_compute_firewall_migrate.go @@ -0,0 +1,93 @@ +package google + +import ( + "fmt" + "log" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeFirewallMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Firewall State v0; migrating to v1") + is, err := migrateFirewallStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateFirewallStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + idx := 0 + portCount := 0 + newPorts := make(map[string]string) + keys := make([]string, len(is.Attributes)) + for k, _ := range is.Attributes { + keys[idx] = k + idx++ + + } + sort.Strings(keys) + for _, k := range keys { + if !strings.HasPrefix(k, "allow.") { + continue + } + + if k == "allow.#" { + continue + } + + if strings.HasSuffix(k, ".ports.#") { + continue + } + + if strings.HasSuffix(k, ".protocol") { + continue + } + + // We have a key that looks like "allow..ports.*" and we know it's not + // allow..ports.# because we deleted it above, so it must be allow..ports. + // from the Set of Ports. Just need to convert it to a list by + // replacing second hash with sequential numbers. + kParts := strings.Split(k, ".") + + // Sanity check: all four parts should be there and should be a number + badFormat := false + if len(kParts) != 4 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found port key in unexpected format: %s", k) + } + allowHash, _ := strconv.Atoi(kParts[1]) + newK := fmt.Sprintf("allow.%d.ports.%d", allowHash, portCount) + portCount++ + newPorts[newK] = is.Attributes[k] + delete(is.Attributes, k) + } + + for k, v := range newPorts { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/provider/terraform/resources/resource_compute_global_forwarding_rule.go b/provider/terraform/resources/resource_compute_global_forwarding_rule.go new file mode 100644 index 000000000000..a775861d2484 --- /dev/null +++ b/provider/terraform/resources/resource_compute_global_forwarding_rule.go @@ -0,0 +1,279 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + + computeBeta "google.golang.org/api/compute/v0.beta" + compute "google.golang.org/api/compute/v1" +) + +func resourceComputeGlobalForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalForwardingRuleCreate, + Read: resourceComputeGlobalForwardingRuleRead, + Update: resourceComputeGlobalForwardingRuleUpdate, + Delete: resourceComputeGlobalForwardingRuleDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "labels": &schema.Schema{ + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "label_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: portRangeDiffSuppress, + }, + + "ip_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"IPV4", "IPV6"}, false), + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Removed: "Please remove this attribute (it was never used)", + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + frule := &computeBeta.ForwardingRule{ + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + IpVersion: d.Get("ip_version").(string), + Description: d.Get("description").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), + } + + op, err := config.clientComputeBeta.GlobalForwardingRules.Insert(project, frule).Do() + if err != nil { + return fmt.Errorf("Error creating Global Forwarding Rule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + err = computeSharedOperationWait(config.clientCompute, op, project, "Creating Global Fowarding Rule") + if err != nil { + return err + } + + // If we have labels to set, try to set those too + if _, ok := d.GetOk("labels"); ok { + labels := expandLabels(d) + // Do a read to get the fingerprint value so we can update + fingerprint, err := resourceComputeGlobalForwardingRuleReadLabelFingerprint(config, project, frule.Name) + if err != nil { + return err + } + + err = resourceComputeGlobalForwardingRuleSetLabels(config, project, frule.Name, labels, fingerprint) + if err != nil { + return err + } + } + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("target") { + target := d.Get("target").(string) + targetRef := &compute.TargetReference{Target: target} + + op, err := config.clientCompute.GlobalForwardingRules.SetTarget( + project, d.Id(), targetRef).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating Global Forwarding Rule") + if err != nil { + return err + } + + d.SetPartial("target") + } + if d.HasChange("labels") { + labels := expandLabels(d) + fingerprint := d.Get("label_fingerprint").(string) + + err = resourceComputeGlobalForwardingRuleSetLabels(config, project, d.Get("name").(string), labels, fingerprint) + if err != nil { + return err + } + + d.SetPartial("labels") + } + + d.Partial(false) + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + frule, err := config.clientComputeBeta.GlobalForwardingRules.Get(project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Global Forwarding Rule %q", d.Get("name").(string))) + } + + d.Set("name", frule.Name) + d.Set("description", frule.Description) + d.Set("target", frule.Target) + d.Set("port_range", frule.PortRange) + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("ip_version", frule.IpVersion) + d.Set("self_link", ConvertSelfLinkToV1(frule.SelfLink)) + d.Set("labels", frule.Labels) + d.Set("label_fingerprint", frule.LabelFingerprint) + d.Set("project", project) + + return nil +} + +func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the GlobalForwardingRule + log.Printf("[DEBUG] GlobalForwardingRule delete request") + op, err := config.clientCompute.GlobalForwardingRules.Delete(project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err) + } + err = computeSharedOperationWait(config.clientCompute, op, project, "Deleting GlobalForwarding Rule") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +// resourceComputeGlobalForwardingRuleReadLabelFingerprint performs a read on the remote resource and returns only the +// fingerprint. Used on create when setting labels as we don't know the label fingerprint initially. +func resourceComputeGlobalForwardingRuleReadLabelFingerprint(config *Config, project, name string) (string, error) { + frule, err := config.clientComputeBeta.GlobalForwardingRules.Get(project, name).Do() + if err != nil { + return "", fmt.Errorf("Unable to read global forwarding rule to update labels: %s", err) + } + + return frule.LabelFingerprint, nil +} + +// resourceComputeGlobalForwardingRuleSetLabels sets the Labels attribute on a forwarding rule. +func resourceComputeGlobalForwardingRuleSetLabels(config *Config, project, name string, labels map[string]string, fingerprint string) error { + setLabels := computeBeta.GlobalSetLabelsRequest{ + Labels: labels, + LabelFingerprint: fingerprint, + } + op, err := config.clientComputeBeta.GlobalForwardingRules.SetLabels(project, name, &setLabels).Do() + if err != nil { + return err + } + + err = computeSharedOperationWait(config.clientCompute, op, project, "Setting labels on Global Forwarding Rule") + if err != nil { + return err + } + + return nil +} diff --git a/provider/terraform/resources/resource_compute_image.go b/provider/terraform/resources/resource_compute_image.go new file mode 100644 index 000000000000..195bbf275641 --- /dev/null +++ b/provider/terraform/resources/resource_compute_image.go @@ -0,0 +1,307 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +const computeImageCreateTimeoutDefault = 4 + +func resourceComputeImage() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeImageCreate, + Read: resourceComputeImageRead, + Update: resourceComputeImageUpdate, + Delete: resourceComputeImageDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(computeImageCreateTimeoutDefault * time.Minute), + Update: schema.DefaultTimeout(computeImageCreateTimeoutDefault * time.Minute), + Delete: schema.DefaultTimeout(computeImageCreateTimeoutDefault * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + // TODO(cblecker): one of source_disk or raw_disk is required + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "family": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "source_disk": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "raw_disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "sha1": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "container_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "TAR", + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "create_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Removed: "Use timeouts block instead. See https://www.terraform.io/docs/configuration/resources.html#timeouts.", + }, + + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "licenses": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Computed: true, + }, + + "label_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the image + image := &compute.Image{ + Name: d.Get("name").(string), + } + + if v, ok := d.GetOk("description"); ok { + image.Description = v.(string) + } + + if v, ok := d.GetOk("family"); ok { + image.Family = v.(string) + } + + // Load up the source_disk for this image if specified + if v, ok := d.GetOk("source_disk"); ok { + image.SourceDisk = v.(string) + } + + // Load up the raw_disk for this image if specified + if v, ok := d.GetOk("raw_disk"); ok { + rawDiskEle := v.([]interface{})[0].(map[string]interface{}) + imageRawDisk := &compute.ImageRawDisk{ + Source: rawDiskEle["source"].(string), + ContainerType: rawDiskEle["container_type"].(string), + } + if val, ok := rawDiskEle["sha1"]; ok { + imageRawDisk.Sha1Checksum = val.(string) + } + + image.RawDisk = imageRawDisk + } + + if _, ok := d.GetOk("labels"); ok { + image.Labels = expandLabels(d) + } + + // Load up the licenses for this image if specified + if _, ok := d.GetOk("licenses"); ok { + image.Licenses = licenses(d) + } + + // Read create timeout + createTimeout := int(d.Timeout(schema.TimeoutCreate).Minutes()) + + // Insert the image + op, err := config.clientCompute.Images.Insert( + project, image).Do() + if err != nil { + return fmt.Errorf("Error creating image: %s", err) + } + + // Store the ID + d.SetId(image.Name) + + err = computeOperationWaitTime(config.clientCompute, op, project, "Creating Image", createTimeout) + if err != nil { + return err + } + + return resourceComputeImageRead(d, meta) +} + +func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + image, err := config.clientCompute.Images.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Image %q", d.Get("name").(string))) + } + + if image.SourceDisk != "" { + d.Set("source_disk", image.SourceDisk) + } else if image.RawDisk != nil { + // `raw_disk.*.source` is only used at image creation but is not returned when calling Get. + // `raw_disk.*.sha1` is not supported, the value is simply discarded by the server. + // Leaving `raw_disk` to current state value. + } else { + return fmt.Errorf("Either raw_disk or source_disk configuration is required.") + } + + d.Set("name", image.Name) + d.Set("description", image.Description) + d.Set("family", image.Family) + d.Set("self_link", image.SelfLink) + d.Set("labels", image.Labels) + d.Set("licenses", image.Licenses) + d.Set("label_fingerprint", image.LabelFingerprint) + d.Set("project", project) + + return nil +} + +func resourceComputeImageUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Technically we are only updating one attribute, but setting d.Partial here makes it easier to add updates later + d.Partial(true) + + if d.HasChange("labels") { + labels := expandLabels(d) + labelFingerprint := d.Get("label_fingerprint").(string) + setLabelsRequest := compute.GlobalSetLabelsRequest{ + LabelFingerprint: labelFingerprint, + Labels: labels, + ForceSendFields: []string{"Labels"}, + } + + op, err := config.clientCompute.Images.SetLabels(project, d.Id(), &setLabelsRequest).Do() + if err != nil { + return err + } + + d.SetPartial("labels") + + err = computeOperationWaitTime(config.clientCompute, op, project, "Setting labels", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if err != nil { + return err + } + // Perform a read to see the new label_fingerprint value + image, err := config.clientCompute.Images.Get(project, d.Id()).Do() + if err != nil { + return err + } + d.Set("label_fingerprint", image.LabelFingerprint) + d.SetPartial("label_fingerprint") + } + + d.Partial(false) + return nil +} + +func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the image + log.Printf("[DEBUG] image delete request") + op, err := config.clientCompute.Images.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting image: %s", err) + } + + err = computeOperationWaitTime(config.clientCompute, op, project, "Deleting image", int(d.Timeout(schema.TimeoutDelete).Minutes())) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func licenses(d *schema.ResourceData) []string { + licensesCount := d.Get("licenses.#").(int) + data := make([]string, licensesCount) + for i := 0; i < licensesCount; i++ { + data[i] = d.Get(fmt.Sprintf("licenses.%d", i)).(string) + } + return data +} diff --git a/provider/terraform/resources/resource_compute_instance.go b/provider/terraform/resources/resource_compute_instance.go new file mode 100644 index 000000000000..0719c777a7af --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance.go @@ -0,0 +1,1613 @@ +package google + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "log" + "strings" + + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/customdiff" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/mitchellh/hashstructure" + computeBeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceCreate, + Read: resourceComputeInstanceRead, + Update: resourceComputeInstanceUpdate, + Delete: resourceComputeInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeInstanceImportState, + }, + + SchemaVersion: 6, + MigrateState: resourceComputeInstanceMigrateState, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(6 * time.Minute), + Update: schema.DefaultTimeout(6 * time.Minute), + Delete: schema.DefaultTimeout(6 * time.Minute), + }, + + // A compute instance is more or less a superset of a compute instance + // template. Please attempt to maintain consistency with the + // resource_compute_instance_template schema when updating this one. + Schema: map[string]*schema.Schema{ + "boot_disk": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "initialize_params": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd"}, false), + }, + + "image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: diskImageDiffSuppress, + }, + }, + }, + }, + + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"boot_disk.initialize_params"}, + DiffSuppressFunc: linkDiffSuppress, + }, + }, + }, + }, + + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "subnetwork_project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Removed: "Please use network_ip", + }, + + "network_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "network_tier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), + }, + + // It's unclear why this field exists, as + // nat_ip can be both optional and computed. + // Consider deprecating it. + "assigned_nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Deprecated: "Use network_interface.access_config.nat_ip instead.", + }, + + "public_ptr_domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "alias_ip_range": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: ipCidrRangeDiffSuppress, + }, + "subnetwork_range_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + + "allow_stopping_for_update": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "attached_disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "READ_WRITE", + ValidateFunc: validation.StringInSlice([]string{"READ_WRITE", "READ_ONLY"}, false), + }, + + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "create_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Removed: "Use timeouts block instead.", + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "deletion_protection": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Removed: "Use boot_disk, scratch_disk, and attached_disk instead", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // TODO(mitchellh): one of image or disk is required + + "disk": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "scratch": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "guest_accelerator": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + }, + }, + }, + + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "metadata_startup_script": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "min_cpu_platform": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "scheduling": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + + "preemptible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + }, + }, + }, + + "scratch_disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "SCSI", + ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false), + }, + }, + }, + }, + + "service_account": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + Set: stringScopeHashcode, + }, + }, + }, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "cpu_platform": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "label_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "tags_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + CustomizeDiff: customdiff.All( + customdiff.If( + func(d *schema.ResourceDiff, meta interface{}) bool { + return d.HasChange("guest_accelerator") + }, + suppressEmptyGuestAcceleratorDiff, + ), + ), + } +} + +func getInstance(config *Config, d *schema.ResourceData) (*computeBeta.Instance, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + zone, err := getZone(d, config) + if err != nil { + return nil, err + } + instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + if err != nil { + return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + } + return instance, nil +} + +func getDisk(diskUri string, d *schema.ResourceData, config *Config) (*compute.Disk, error) { + source, err := ParseDiskFieldValue(diskUri, d, config) + if err != nil { + return nil, err + } + + disk, err := config.clientCompute.Disks.Get(source.Project, source.Zone, source.Name).Do() + if err != nil { + return nil, err + } + + return disk, err +} + +func expandComputeInstance(project string, zone *compute.Zone, d *schema.ResourceData, config *Config) (*computeBeta.Instance, error) { + // Get the machine type + var machineTypeUrl string + if mt, ok := d.GetOk("machine_type"); ok { + log.Printf("[DEBUG] Loading machine type: %s", mt.(string)) + machineType, err := config.clientCompute.MachineTypes.Get( + project, zone.Name, mt.(string)).Do() + if err != nil { + return nil, fmt.Errorf( + "Error loading machine type: %s", + err) + } + machineTypeUrl = machineType.SelfLink + } + + // Build up the list of disks + + disks := []*computeBeta.AttachedDisk{} + if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { + bootDisk, err := expandBootDisk(d, config, zone, project) + if err != nil { + return nil, err + } + disks = append(disks, bootDisk) + } + + if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { + scratchDisks, err := expandScratchDisks(d, config, zone, project) + if err != nil { + return nil, err + } + disks = append(disks, scratchDisks...) + } + + attachedDisksCount := d.Get("attached_disk.#").(int) + + for i := 0; i < attachedDisksCount; i++ { + diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) + disk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return nil, err + } + + disks = append(disks, disk) + } + + prefix := "scheduling.0" + scheduling := &computeBeta.Scheduling{ + AutomaticRestart: googleapi.Bool(d.Get(prefix + ".automatic_restart").(bool)), + Preemptible: d.Get(prefix + ".preemptible").(bool), + OnHostMaintenance: d.Get(prefix + ".on_host_maintenance").(string), + ForceSendFields: []string{"AutomaticRestart", "Preemptible"}, + } + + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return nil, fmt.Errorf("Error creating metadata: %s", err) + } + + networkInterfaces, err := expandNetworkInterfaces(d, config) + if err != nil { + return nil, fmt.Errorf("Error creating network interfaces: %s", err) + } + + accels, err := expandInstanceGuestAccelerators(d, config) + if err != nil { + return nil, fmt.Errorf("Error creating guest accelerators: %s", err) + } + + // Create the instance information + return &computeBeta.Instance{ + CanIpForward: d.Get("can_ip_forward").(bool), + Description: d.Get("description").(string), + Disks: disks, + MachineType: machineTypeUrl, + Metadata: metadata, + Name: d.Get("name").(string), + NetworkInterfaces: networkInterfaces, + Tags: resourceInstanceTags(d), + Labels: expandLabels(d), + ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), + GuestAccelerators: accels, + MinCpuPlatform: d.Get("min_cpu_platform").(string), + Scheduling: scheduling, + DeletionProtection: d.Get("deletion_protection").(bool), + ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, + }, nil +} + +func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Get the zone + z, err := getZone(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Loading zone: %s", z) + zone, err := config.clientCompute.Zones.Get( + project, z).Do() + if err != nil { + return fmt.Errorf("Error loading zone '%s': %s", z, err) + } + + instance, err := expandComputeInstance(project, zone, d, config) + if err != nil { + return err + } + + // Read create timeout + createTimeout := int(d.Timeout(schema.TimeoutCreate).Minutes()) + + log.Printf("[INFO] Requesting instance creation") + op, err := config.clientComputeBeta.Instances.Insert(project, zone.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(instance.Name) + + // Wait for the operation to complete + waitErr := computeSharedOperationWaitTime(config.clientCompute, op, project, createTimeout, "instance to create") + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + return resourceComputeInstanceRead(d, meta) +} + +func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + instance, err := getInstance(config, d) + if err != nil || instance == nil { + return err + } + + md := flattenMetadataBeta(instance.Metadata) + existingMetadata := d.Get("metadata").(map[string]interface{}) + + // If the existing config specifies "metadata.startup-script" instead of "metadata_startup_script", + // we shouldn't move the remote metadata.startup-script to metadata_startup_script. Otherwise, + // we should. + if ss, ok := existingMetadata["startup-script"]; !ok || ss == "" { + d.Set("metadata_startup_script", md["startup-script"]) + // Note that here we delete startup-script from our metadata list. This is to prevent storing the startup-script + // as a value in the metadata since the config specifically tracks it under 'metadata_startup_script' + delete(md, "startup-script") + } else if _, ok := d.GetOk("metadata_startup_script"); ok { + delete(md, "startup-script") + } + + if err = d.Set("metadata", md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + + d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) + d.Set("can_ip_forward", instance.CanIpForward) + d.Set("machine_type", GetResourceNameFromSelfLink(instance.MachineType)) + + // Set the networks + // Use the first external IP found for the default connection info. + networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) + if err != nil { + return err + } + if err := d.Set("network_interface", networkInterfaces); err != nil { + return err + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP + } + + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": sshIP, + }) + + // Set the tags fingerprint if there is one. + if instance.Tags != nil { + d.Set("tags_fingerprint", instance.Tags.Fingerprint) + d.Set("tags", convertStringArrToInterface(instance.Tags.Items)) + } + + if err := d.Set("labels", instance.Labels); err != nil { + return err + } + + if instance.LabelFingerprint != "" { + d.Set("label_fingerprint", instance.LabelFingerprint) + } + + attachedDiskSources := make(map[string]int) + for i, v := range d.Get("attached_disk").([]interface{}) { + if v == nil { + // There was previously a bug in this code that, when triggered, + // would cause some nil values to end up in the list of attached disks. + // Check for this case to make sure we don't try to parse the nil disk. + continue + } + disk := v.(map[string]interface{}) + s := disk["source"].(string) + var sourceLink string + if strings.Contains(s, "regions/") { + source, err := ParseRegionDiskFieldValue(disk["source"].(string), d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } else { + source, err := ParseDiskFieldValue(disk["source"].(string), d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } + attachedDiskSources[sourceLink] = i + } + + attachedDisks := make([]map[string]interface{}, d.Get("attached_disk.#").(int)) + scratchDisks := []map[string]interface{}{} + for _, disk := range instance.Disks { + if disk.Boot { + d.Set("boot_disk", flattenBootDisk(d, disk, config)) + } else if disk.Type == "SCRATCH" { + scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) + } else { + var sourceLink string + if strings.Contains(disk.Source, "regions/") { + source, err := ParseRegionDiskFieldValue(disk.Source, d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } else { + source, err := ParseDiskFieldValue(disk.Source, d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } + adIndex, inConfig := attachedDiskSources[sourceLink] + di := map[string]interface{}{ + "source": ConvertSelfLinkToV1(disk.Source), + "device_name": disk.DeviceName, + "mode": disk.Mode, + } + if key := disk.DiskEncryptionKey; key != nil { + if inConfig { + di["disk_encryption_key_raw"] = d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)) + } + di["disk_encryption_key_sha256"] = key.Sha256 + } + // We want the disks to remain in the order we set in the config, so if a disk + // is present in the config, make sure it's at the correct index. Otherwise, append it. + if inConfig { + attachedDisks[adIndex] = di + } else { + attachedDisks = append(attachedDisks, di) + } + } + } + // Remove nils from map in case there were disks in the config that were not present on read; + // i.e. a disk was detached out of band + ads := []map[string]interface{}{} + for _, d := range attachedDisks { + if d != nil { + ads = append(ads, d) + } + } + + d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)) + d.Set("attached_disk", ads) + d.Set("scratch_disk", scratchDisks) + d.Set("scheduling", flattenScheduling(instance.Scheduling)) + d.Set("guest_accelerator", flattenGuestAccelerators(instance.GuestAccelerators)) + d.Set("cpu_platform", instance.CpuPlatform) + d.Set("min_cpu_platform", instance.MinCpuPlatform) + d.Set("deletion_protection", instance.DeletionProtection) + d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)) + d.Set("instance_id", fmt.Sprintf("%d", instance.Id)) + d.Set("project", project) + d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)) + d.Set("name", instance.Name) + d.SetId(instance.Name) + + return nil +} + +func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + + // Use beta api directly in order to read network_interface.fingerprint without having to put it in the schema. + // Change back to getInstance(config, d) once updating alias ips is GA. + instance, err := config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + } + + // Enable partial mode for the resource since it is possible + d.Partial(true) + + if d.HasChange("metadata") { + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return fmt.Errorf("Error parsing metadata: %s", err) + } + + metadataV1 := &compute.Metadata{} + if err := Convert(metadata, metadataV1); err != nil { + return err + } + + op, err := config.clientCompute.Instances.SetMetadata(project, zone, d.Id(), metadataV1).Do() + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "metadata to update", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + + d.SetPartial("metadata") + } + + if d.HasChange("tags") { + tags := resourceInstanceTags(d) + tagsV1 := &compute.Tags{} + if err := Convert(tags, tagsV1); err != nil { + return err + } + op, err := config.clientCompute.Instances.SetTags( + project, zone, d.Id(), tagsV1).Do() + if err != nil { + return fmt.Errorf("Error updating tags: %s", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "tags to update", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + + d.SetPartial("tags") + } + + if d.HasChange("labels") { + labels := expandLabels(d) + labelFingerprint := d.Get("label_fingerprint").(string) + req := compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: labelFingerprint} + + op, err := config.clientCompute.Instances.SetLabels(project, zone, d.Id(), &req).Do() + if err != nil { + return fmt.Errorf("Error updating labels: %s", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "labels to update", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + + d.SetPartial("labels") + } + + if d.HasChange("scheduling") { + prefix := "scheduling.0" + scheduling := &compute.Scheduling{ + AutomaticRestart: googleapi.Bool(d.Get(prefix + ".automatic_restart").(bool)), + Preemptible: d.Get(prefix + ".preemptible").(bool), + OnHostMaintenance: d.Get(prefix + ".on_host_maintenance").(string), + ForceSendFields: []string{"AutomaticRestart", "Preemptible"}, + } + + op, err := config.clientCompute.Instances.SetScheduling(project, + zone, d.Id(), scheduling).Do() + + if err != nil { + return fmt.Errorf("Error updating scheduling policy: %s", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "scheduling policy update", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + + d.SetPartial("scheduling") + } + + networkInterfacesCount := d.Get("network_interface.#").(int) + // Sanity check + if networkInterfacesCount != len(instance.NetworkInterfaces) { + return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) + } + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + instNetworkInterface := instance.NetworkInterfaces[i] + networkName := d.Get(prefix + ".name").(string) + + // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) + networkName = instNetworkInterface.Name + // Sanity check + if networkName != instNetworkInterface.Name { + return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) + } + + if d.HasChange(prefix + ".access_config") { + + // TODO: This code deletes then recreates accessConfigs. This is bad because it may + // leave the machine inaccessible from either ip if the creation part fails (network + // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is + // the only way to do it. In future this should be revised to only change what is + // necessary, and also add before removing. + + // Delete any accessConfig that currently exists in instNetworkInterface + for _, ac := range instNetworkInterface.AccessConfigs { + op, err := config.clientCompute.Instances.DeleteAccessConfig( + project, zone, d.Id(), ac.Name, networkName).Do() + if err != nil { + return fmt.Errorf("Error deleting old access_config: %s", err) + } + opErr := computeOperationWaitTime(config.clientCompute, op, project, "old access_config to delete", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + } + + // Create new ones + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + ac := &computeBeta.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + NetworkTier: d.Get(acPrefix + ".network_tier").(string), + } + if ptr, ok := d.GetOk(acPrefix + ".public_ptr_domain_name"); ok && ptr != "" { + ac.SetPublicPtr = true + ac.PublicPtrDomainName = ptr.(string) + } + + op, err := config.clientComputeBeta.Instances.AddAccessConfig( + project, zone, d.Id(), networkName, ac).Do() + if err != nil { + return fmt.Errorf("Error adding new access_config: %s", err) + } + opErr := computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutUpdate).Minutes()), "new access_config to add") + if opErr != nil { + return opErr + } + } + } + + if d.HasChange(prefix + ".alias_ip_range") { + rereadFingerprint := false + + // Alias IP ranges cannot be updated; they must be removed and then added. + if len(instNetworkInterface.AliasIpRanges) > 0 { + ni := &computeBeta.NetworkInterface{ + Fingerprint: instNetworkInterface.Fingerprint, + ForceSendFields: []string{"AliasIpRanges"}, + } + op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, d.Id(), networkName, ni).Do() + if err != nil { + return errwrap.Wrapf("Error removing alias_ip_range: {{err}}", err) + } + opErr := computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutUpdate).Minutes()), "updaing alias ip ranges") + if opErr != nil { + return opErr + } + rereadFingerprint = true + } + + ranges := d.Get(prefix + ".alias_ip_range").([]interface{}) + if len(ranges) > 0 { + if rereadFingerprint { + instance, err = config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do() + if err != nil { + return err + } + instNetworkInterface = instance.NetworkInterfaces[i] + } + ni := &computeBeta.NetworkInterface{ + AliasIpRanges: expandAliasIpRanges(ranges), + Fingerprint: instNetworkInterface.Fingerprint, + } + op, err := config.clientComputeBeta.Instances.UpdateNetworkInterface(project, zone, d.Id(), networkName, ni).Do() + if err != nil { + return errwrap.Wrapf("Error adding alias_ip_range: {{err}}", err) + } + opErr := computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutUpdate).Minutes()), "updaing alias ip ranges") + if opErr != nil { + return opErr + } + } + } + d.SetPartial("network_interface") + } + + if d.HasChange("attached_disk") { + o, n := d.GetChange("attached_disk") + + // Keep track of disks currently in the instance. Because the google_compute_disk resource + // can detach disks, it's possible that there are fewer disks currently attached than there + // were at the time we ran terraform plan. + currDisks := map[string]struct{}{} + for _, disk := range instance.Disks { + if !disk.Boot && disk.Type != "SCRATCH" { + currDisks[disk.DeviceName] = struct{}{} + } + } + + // Keep track of disks currently in state. + // Since changing any field within the disk needs to detach+reattach it, + // keep track of the hash of the full disk. + oDisks := map[uint64]string{} + for _, disk := range o.([]interface{}) { + diskConfig := disk.(map[string]interface{}) + computeDisk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return err + } + hash, err := hashstructure.Hash(*computeDisk, nil) + if err != nil { + return err + } + if _, ok := currDisks[computeDisk.DeviceName]; ok { + oDisks[hash] = computeDisk.DeviceName + } + } + + // Keep track of new config's disks. + // Since changing any field within the disk needs to detach+reattach it, + // keep track of the hash of the full disk. + // If a disk with a certain hash is only in the new config, it should be attached. + nDisks := map[uint64]struct{}{} + var attach []*compute.AttachedDisk + for _, disk := range n.([]interface{}) { + diskConfig := disk.(map[string]interface{}) + computeDisk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return err + } + hash, err := hashstructure.Hash(*computeDisk, nil) + if err != nil { + return err + } + nDisks[hash] = struct{}{} + + if _, ok := oDisks[hash]; !ok { + computeDiskV1 := &compute.AttachedDisk{} + err = Convert(computeDisk, computeDiskV1) + if err != nil { + return err + } + attach = append(attach, computeDiskV1) + } + } + + // If a source is only in the old config, it should be detached. + // Detach the old disks. + for hash, deviceName := range oDisks { + if _, ok := nDisks[hash]; !ok { + op, err := config.clientCompute.Instances.DetachDisk(project, zone, instance.Name, deviceName).Do() + if err != nil { + return errwrap.Wrapf("Error detaching disk: %s", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "detaching disk", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + log.Printf("[DEBUG] Successfully detached disk %s", deviceName) + } + } + + // Attach the new disks + for _, disk := range attach { + op, err := config.clientCompute.Instances.AttachDisk(project, zone, instance.Name, disk).Do() + if err != nil { + return errwrap.Wrapf("Error attaching disk : {{err}}", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "attaching disk", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + log.Printf("[DEBUG] Successfully attached disk %s", disk.Source) + } + + d.SetPartial("attached_disk") + } + + // d.HasChange("service_account") is oversensitive: see https://github.com/hashicorp/terraform/issues/17411 + // Until that's fixed, manually check whether there is a change. + o, n := d.GetChange("service_account") + oList := o.([]interface{}) + nList := n.([]interface{}) + scopesChange := false + if len(oList) != len(nList) { + scopesChange = true + } else if len(oList) == 1 { + // service_account has MaxItems: 1 + // scopes is a required field and so will always be set + oScopes := oList[0].(map[string]interface{})["scopes"].(*schema.Set) + nScopes := nList[0].(map[string]interface{})["scopes"].(*schema.Set) + scopesChange = !oScopes.Equal(nScopes) + } + + if d.HasChange("deletion_protection") { + nDeletionProtection := d.Get("deletion_protection").(bool) + + op, err := config.clientCompute.Instances.SetDeletionProtection(project, zone, d.Id()).DeletionProtection(nDeletionProtection).Do() + if err != nil { + return fmt.Errorf("Error updating deletion protection flag: %s", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "deletion protection to update", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + + d.SetPartial("deletion_protection") + } + + // Attributes which can only be changed if the instance is stopped + if scopesChange || d.HasChange("service_account.0.email") || d.HasChange("machine_type") || d.HasChange("min_cpu_platform") { + if !d.Get("allow_stopping_for_update").(bool) { + return fmt.Errorf("Changing the machine_type, min_cpu_platform, or service_account on an instance requires stopping it. " + + "To acknowledge this, please set allow_stopping_for_update = true in your config.") + } + op, err := config.clientCompute.Instances.Stop(project, zone, instance.Name).Do() + if err != nil { + return errwrap.Wrapf("Error stopping instance: {{err}}", err) + } + + opErr := computeOperationWaitTime(config.clientCompute, op, project, "stopping instance", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + + if d.HasChange("machine_type") { + mt, err := ParseMachineTypesFieldValue(d.Get("machine_type").(string), d, config) + if err != nil { + return err + } + req := &compute.InstancesSetMachineTypeRequest{ + MachineType: mt.RelativeLink(), + } + op, err = config.clientCompute.Instances.SetMachineType(project, zone, instance.Name, req).Do() + if err != nil { + return err + } + opErr := computeOperationWaitTime(config.clientCompute, op, project, "updating machinetype", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + d.SetPartial("machine_type") + } + + if d.HasChange("min_cpu_platform") { + minCpuPlatform, ok := d.GetOk("min_cpu_platform") + // Even though you don't have to set minCpuPlatform on create, you do have to set it to an + // actual value on update. "Automatic" is the default. This will be read back from the API as empty, + // so we don't need to worry about diffs. + if !ok { + minCpuPlatform = "Automatic" + } + req := &compute.InstancesSetMinCpuPlatformRequest{ + MinCpuPlatform: minCpuPlatform.(string), + } + op, err = config.clientCompute.Instances.SetMinCpuPlatform(project, zone, instance.Name, req).Do() + if err != nil { + return err + } + opErr := computeOperationWaitTime(config.clientCompute, op, project, "updating min cpu platform", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + d.SetPartial("min_cpu_platform") + } + + if d.HasChange("service_account.0.email") || scopesChange { + sa := d.Get("service_account").([]interface{}) + req := &compute.InstancesSetServiceAccountRequest{ForceSendFields: []string{"email"}} + if len(sa) > 0 && sa[0] != nil { + saMap := sa[0].(map[string]interface{}) + req.Email = saMap["email"].(string) + req.Scopes = canonicalizeServiceScopes(convertStringSet(saMap["scopes"].(*schema.Set))) + } + op, err = config.clientCompute.Instances.SetServiceAccount(project, zone, instance.Name, req).Do() + if err != nil { + return err + } + opErr := computeOperationWaitTime(config.clientCompute, op, project, "updating service account", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + d.SetPartial("service_account") + } + + op, err = config.clientCompute.Instances.Start(project, zone, instance.Name).Do() + if err != nil { + return errwrap.Wrapf("Error starting instance: {{err}}", err) + } + + opErr = computeOperationWaitTime(config.clientCompute, op, project, "starting instance", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if opErr != nil { + return opErr + } + } + + // We made it, disable partial mode + d.Partial(false) + + return resourceComputeInstanceRead(d, meta) +} + +func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceData, meta interface{}) (*computeBeta.AttachedDisk, error) { + config := meta.(*Config) + + s := diskConfig["source"].(string) + var sourceLink string + if strings.Contains(s, "regions/") { + source, err := ParseRegionDiskFieldValue(s, d, config) + if err != nil { + return nil, err + } + sourceLink = source.RelativeLink() + } else { + source, err := ParseDiskFieldValue(s, d, config) + if err != nil { + return nil, err + } + sourceLink = source.RelativeLink() + } + + disk := &computeBeta.AttachedDisk{ + Source: sourceLink, + } + + if v, ok := diskConfig["mode"]; ok { + disk.Mode = v.(string) + } + + if v, ok := diskConfig["device_name"]; ok { + disk.DeviceName = v.(string) + } + + if v, ok := diskConfig["disk_encryption_key_raw"]; ok { + disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ + RawKey: v.(string), + } + } + return disk, nil +} + +// See comment on expandInstanceTemplateGuestAccelerators regarding why this +// code is duplicated. +func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*computeBeta.AcceleratorConfig, error) { + configs, ok := d.GetOk("guest_accelerator") + if !ok { + return nil, nil + } + accels := configs.([]interface{}) + guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels)) + for _, raw := range accels { + data := raw.(map[string]interface{}) + if data["count"].(int) == 0 { + continue + } + at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config) + if err != nil { + return nil, fmt.Errorf("cannot parse accelerator type: %v", err) + } + guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{ + AcceleratorCount: int64(data["count"].(int)), + AcceleratorType: at.RelativeLink(), + }) + } + + return guestAccelerators, nil +} + +// suppressEmptyGuestAcceleratorDiff is used to work around perpetual diff +// issues when a count of `0` guest accelerators is desired. This may occur when +// guest_accelerator support is controlled via a module variable. E.g.: +// +// guest_accelerators { +// count = "${var.enable_gpu ? var.gpu_count : 0}" +// ... +// } +// After reconciling the desired and actual state, we would otherwise see a +// perpetual resembling: +// [] != [{"count":0, "type": "nvidia-tesla-k80"}] +func suppressEmptyGuestAcceleratorDiff(d *schema.ResourceDiff, meta interface{}) error { + oldi, newi := d.GetChange("guest_accelerator") + + old, ok := oldi.([]interface{}) + if !ok { + return fmt.Errorf("Expected old guest accelerator diff to be a slice") + } + + new, ok := newi.([]interface{}) + if !ok { + return fmt.Errorf("Expected new guest accelerator diff to be a slice") + } + + if len(old) != 0 && len(new) != 1 { + return nil + } + + firstAccel, ok := new[0].(map[string]interface{}) + if !ok { + return fmt.Errorf("Unable to type assert guest accelerator") + } + + if firstAccel["count"].(int) == 0 { + if err := d.Clear("guest_accelerator"); err != nil { + return err + } + } + + return nil +} + +func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + log.Printf("[INFO] Requesting instance deletion: %s", d.Id()) + + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("Cannot delete instance %s: instance Deletion Protection is enabled. Set deletion_protection to false for this resource and run \"terraform apply\" before attempting to delete it.", d.Id()) + } else { + op, err := config.clientCompute.Instances.Delete(project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance: %s", err) + } + + // Wait for the operation to complete + opErr := computeOperationWaitTime(config.clientCompute, op, project, "instance to delete", int(d.Timeout(schema.TimeoutDelete).Minutes())) + if opErr != nil { + return opErr + } + + d.SetId("") + return nil + } +} + +func resourceComputeInstanceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid import id %q. Expecting {project}/{zone}/{instance_name}", d.Id()) + } + + d.Set("project", parts[0]) + d.Set("zone", parts[1]) + d.SetId(parts[2]) + + return []*schema.ResourceData{d}, nil +} + +func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) (*computeBeta.AttachedDisk, error) { + disk := &computeBeta.AttachedDisk{ + AutoDelete: d.Get("boot_disk.0.auto_delete").(bool), + Boot: true, + } + + if v, ok := d.GetOk("boot_disk.0.device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { + disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{ + RawKey: v.(string), + } + } + + if v, ok := d.GetOk("boot_disk.0.source"); ok { + source, err := ParseDiskFieldValue(v.(string), d, config) + if err != nil { + return nil, err + } + disk.Source = source.RelativeLink() + } + + if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok { + disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{} + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok { + disk.InitializeParams.DiskSizeGb = int64(v.(int)) + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok { + diskTypeName := v.(string) + diskType, err := readDiskType(config, zone, project, diskTypeName) + if err != nil { + return nil, fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err) + } + disk.InitializeParams.DiskType = diskType.SelfLink + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { + imageName := v.(string) + imageUrl, err := resolveImage(config, project, imageName) + if err != nil { + return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err) + } + + disk.InitializeParams.SourceImage = imageUrl + } + } + + return disk, nil +} + +func flattenBootDisk(d *schema.ResourceData, disk *computeBeta.AttachedDisk, config *Config) []map[string]interface{} { + result := map[string]interface{}{ + "auto_delete": disk.AutoDelete, + "device_name": disk.DeviceName, + "source": ConvertSelfLinkToV1(disk.Source), + // disk_encryption_key_raw is not returned from the API, so copy it from what the user + // originally specified to avoid diffs. + "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), + } + + diskDetails, err := getDisk(disk.Source, d, config) + if err != nil { + log.Printf("[WARN] Cannot retrieve boot disk details: %s", err) + + if _, ok := d.GetOk("boot_disk.0.initialize_params.#"); ok { + // If we can't read the disk details due to permission for instance, + // copy the initialize_params from what the user originally specified to avoid diffs. + m := d.Get("boot_disk.0.initialize_params") + result["initialize_params"] = m + } + } else { + result["initialize_params"] = []map[string]interface{}{{ + "type": GetResourceNameFromSelfLink(diskDetails.Type), + // If the config specifies a family name that doesn't match the image name, then + // the diff won't be properly suppressed. See DiffSuppressFunc for this field. + "image": diskDetails.SourceImage, + "size": diskDetails.SizeGb, + }} + } + + if disk.DiskEncryptionKey != nil { + result["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 + } + + return []map[string]interface{}{result} +} + +func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) ([]*computeBeta.AttachedDisk, error) { + diskType, err := readDiskType(config, zone, project, "local-ssd") + if err != nil { + return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err) + } + + n := d.Get("scratch_disk.#").(int) + scratchDisks := make([]*computeBeta.AttachedDisk, 0, n) + for i := 0; i < n; i++ { + scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{ + AutoDelete: true, + Type: "SCRATCH", + Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string), + InitializeParams: &computeBeta.AttachedDiskInitializeParams{ + DiskType: diskType.SelfLink, + }, + }) + } + + return scratchDisks, nil +} + +func flattenScratchDisk(disk *computeBeta.AttachedDisk) map[string]interface{} { + result := map[string]interface{}{ + "interface": disk.Interface, + } + return result +} + +func hash256(raw string) (string, error) { + decoded, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return "", err + } + h := sha256.Sum256(decoded) + return base64.StdEncoding.EncodeToString(h[:]), nil +} diff --git a/provider/terraform/resources/resource_compute_instance_from_template.go b/provider/terraform/resources/resource_compute_instance_from_template.go new file mode 100644 index 000000000000..6ca9f0fa77e4 --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance_from_template.go @@ -0,0 +1,142 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + strcase "github.com/stoewer/go-strcase" +) + +func resourceComputeInstanceFromTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceFromTemplateCreate, + Read: resourceComputeInstanceRead, + Update: resourceComputeInstanceUpdate, + Delete: resourceComputeInstanceDelete, + + // Import doesn't really make sense, because you could just import + // as a google_compute_instance. + + Timeouts: resourceComputeInstance().Timeouts, + + Schema: computeInstanceFromTemplateSchema(), + CustomizeDiff: resourceComputeInstance().CustomizeDiff, + } +} + +func computeInstanceFromTemplateSchema() map[string]*schema.Schema { + s := resourceComputeInstance().Schema + + for _, field := range []string{"boot_disk", "machine_type", "network_interface"} { + // The user can set these fields as an override, but doesn't need to - + // the template values will be used if they're unset. + s[field].Required = false + s[field].Optional = true + } + + // Remove deprecated/removed fields that are never d.Set. We can't + // programatically remove all of them, because some of them still have d.Set + // calls. + for _, field := range []string{"create_timeout", "disk", "network"} { + delete(s, field) + } + + recurseOnSchema(s, func(field *schema.Schema) { + // We don't want to accidentally use default values to override the instance + // template, so remove defaults. + field.Default = nil + + // Make non-required fields computed since they'll be set by the template. + // Leave deprecated and removed fields alone because we don't set them. + if !field.Required && !(field.Deprecated != "" || field.Removed != "") { + field.Computed = true + } + }) + + s["source_instance_template"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + + return s +} + +func recurseOnSchema(s map[string]*schema.Schema, f func(*schema.Schema)) { + for _, field := range s { + f(field) + if e := field.Elem; e != nil { + if r, ok := e.(*schema.Resource); ok { + recurseOnSchema(r.Schema, f) + } + } + } +} + +func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Get the zone + z, err := getZone(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Loading zone: %s", z) + zone, err := config.clientCompute.Zones.Get(project, z).Do() + if err != nil { + return fmt.Errorf("Error loading zone '%s': %s", z, err) + } + + instance, err := expandComputeInstance(project, zone, d, config) + if err != nil { + return err + } + + // Force send all top-level fields in case they're overridden to zero values. + // TODO: consider doing so for nested fields as well. + for f, s := range computeInstanceFromTemplateSchema() { + // It seems that GetOkExists always returns true for sets. + // TODO: confirm this and file issue against Terraform core. + // In the meantime, don't force send sets. + if s.Type == schema.TypeSet { + continue + } + + if _, exists := d.GetOkExists(f); exists { + // Assume for now that all fields are exact snake_case versions of the API fields. + // This won't necessarily always be true, but it serves as a good approximation and + // can be adjusted later as we discover issues. + instance.ForceSendFields = append(instance.ForceSendFields, strcase.UpperCamelCase(f)) + } + } + + tpl, err := ParseInstanceTemplateFieldValue(d.Get("source_instance_template").(string), d, config) + if err != nil { + return err + } + + log.Printf("[INFO] Requesting instance creation") + op, err := config.clientComputeBeta.Instances.Insert(project, zone.Name, instance).SourceInstanceTemplate(tpl.RelativeLink()).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(instance.Name) + + // Wait for the operation to complete + waitErr := computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), "instance to create") + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + return resourceComputeInstanceRead(d, meta) +} diff --git a/provider/terraform/resources/resource_compute_instance_group.go b/provider/terraform/resources/resource_compute_instance_group.go new file mode 100644 index 000000000000..6967dd00fe23 --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance_group.go @@ -0,0 +1,499 @@ +package google + +import ( + "fmt" + "log" + "reflect" + "sort" + "strings" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstanceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupCreate, + Read: resourceComputeInstanceGroupRead, + Update: resourceComputeInstanceGroupUpdate, + Delete: resourceComputeInstanceGroupDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeInstanceGroupImportState, + }, + + SchemaVersion: 2, + MigrateState: resourceComputeInstanceGroupMigrateState, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "instances": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "named_port": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "port": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "size": { + Type: schema.TypeInt, + Computed: true, + }, + }, + + CustomizeDiff: customDiffInstanceGroupInstancesField, + } +} + +func customDiffInstanceGroupInstancesField(diff *schema.ResourceDiff, meta interface{}) error { + // This deals with an interesting problem that deserves some attention. + // When an instance is destroyed and recreated, its membership in + // instance groups disappears. However, its recreated `self_link` will + // be the same as the `self_link` from before the destroy/recreate. + // Therefore, if some instances which are set in the `instances` field + // are destroyed and recreated, although in *reality* there is a diff + // between the GCP state and the desired state, Terraform cannot *see* + // the diff without running a full Read cycle. There's no Read implicit + // in the `Apply` stage, so we need to trick Terraform into calling + // Update when things like this happen. + + // This function will be called in 3 different states. + // 1) it will be called on a new resource which hasn't been created yet. + // We shouldn't do anything interesting in that case. + // 2) it will be called on an updated resource during "plan" time - that is, + // before anything has actually been done. In that case, we need to show + // a diff on the resource if there's a chance that any of the instances + // will be destroyed and recreated. Fortunately, in this case, the + // upstream logic will show that there is a diff. This will be a + // "Computed" diff - as if we had called diff.SetComputed("instances"), + // and that's a good response. + // 3) it will be called on an updated resource at "apply" time - that is, + // right when we're about to do something with this resource. That + // is designed to check whether there really is a diff on the Computed + // field "instances". Here, we have to get tricky. We need to show + // a diff, and it can't be a Computed diff (`apply` skips `Update` if + // the only diffs are Computed). It also can't be a ForceNew diff, + // because Terraform crashes if there's a ForceNew diff at apply time + // after not seeing one at plan time. We're in a pickle - the Terraform + // state matches our desired state, but is *wrong*. We add a fake item + // to the "instances" set, so that Terraform sees a diff between the + // state and the desired state. + + oldI, newI := diff.GetChange("instances") + oldInstanceSet := oldI.(*schema.Set) + newInstanceSet := newI.(*schema.Set) + oldInstances := convertStringArr(oldInstanceSet.List()) + newInstances := convertStringArr(newInstanceSet.List()) + + log.Printf("[DEBUG] InstanceGroup CustomizeDiff old: %#v, new: %#v", oldInstances, newInstances) + var memberUrls []string + config := meta.(*Config) + // We can't use getProject() or getZone(), because we only have a schema.ResourceDiff, + // not a schema.ResourceData. We'll have to emulate them like this. + project := diff.Get("project").(string) + if project == "" { + project = config.Project + } + zone := diff.Get("zone").(string) + if zone == "" { + project = config.Zone + } + + // We need to see what instances are present in the instance group. There are a few + // possible results. + // 1) The instance group doesn't exist. We don't change the diff in this case - + // if the instance is being created, that's the right thing to do. + // 2) The instance group exists, and the GCP state matches the terraform state. In this + // case, we should do nothing. + // 3) The instance group exists, and the GCP state does not match the terraform state. + // In this case, we add the string "FORCE_UPDATE" to the list of instances, to convince + // Terraform to execute an update even though there's no diff between the terraform + // state and the desired state. + members, err := config.clientCompute.InstanceGroups.ListInstances( + project, zone, diff.Get("name").(string), &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // This is where we'll end up at either plan time or apply time on first creation. + return nil + } else { + // Any other errors return them + return fmt.Errorf("Error reading InstanceGroup Members: %s", err) + } + } + for _, member := range members.Items { + memberUrls = append(memberUrls, member.Instance) + } + sort.Strings(memberUrls) + sort.Strings(oldInstances) + sort.Strings(newInstances) + log.Printf("[DEBUG] InstanceGroup members: %#v. OldInstances: %#v", memberUrls, oldInstances) + if !reflect.DeepEqual(memberUrls, oldInstances) && reflect.DeepEqual(newInstances, oldInstances) { + // This is where we'll end up at apply-time only if an instance is + // somehow removed from the set of instances between refresh and update. + newInstancesList := append(newInstances, "FORCE_UPDATE") + diff.SetNew("instances", newInstancesList) + } + // This is where we'll end up if the GCP state matches the Terraform state. + return nil +} + +func getInstanceReferences(instanceUrls []string) (refs []*compute.InstanceReference) { + for _, v := range instanceUrls { + refs = append(refs, &compute.InstanceReference{ + Instance: v, + }) + } + return refs +} + +func validInstanceURLs(instanceUrls []string) bool { + for _, v := range instanceUrls { + if !strings.HasPrefix(v, "https://www.googleapis.com/compute/v1/") { + return false + } + } + return true +} + +func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + // Build the parameter + instanceGroup := &compute.InstanceGroup{ + Name: name, + } + + // Set optional fields + if v, ok := d.GetOk("description"); ok { + instanceGroup.Description = v.(string) + } + + if v, ok := d.GetOk("named_port"); ok { + instanceGroup.NamedPorts = getNamedPorts(v.([]interface{})) + } + + if v, ok := d.GetOk("network"); ok { + instanceGroup.Network = v.(string) + } + + log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup) + op, err := config.clientCompute.InstanceGroups.Insert( + project, zone, instanceGroup).Do() + if err != nil { + return fmt.Errorf("Error creating InstanceGroup: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(fmt.Sprintf("%s/%s", zone, name)) + + // Wait for the operation to complete + err = computeOperationWait(config.clientCompute, op, project, "Creating InstanceGroup") + if err != nil { + d.SetId("") + return err + } + + if v, ok := d.GetOk("instances"); ok { + instanceUrls := convertStringArr(v.(*schema.Set).List()) + if !validInstanceURLs(instanceUrls) { + return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls) + } + + addInstanceReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(instanceUrls), + } + + log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq) + op, err := config.clientCompute.InstanceGroups.AddInstances( + project, zone, name, addInstanceReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances to InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWait(config.clientCompute, op, project, "Adding instances to InstanceGroup") + if err != nil { + return err + } + } + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + // retrieve instance group + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + project, zone, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", name)) + } + + // retrieve instance group members + var memberUrls []string + members, err := config.clientCompute.InstanceGroups.ListInstances( + project, zone, name, &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't have any instances + d.Set("instances", nil) + } else { + // any other errors return them + return fmt.Errorf("Error reading InstanceGroup Members: %s", err) + } + } else { + for _, member := range members.Items { + memberUrls = append(memberUrls, member.Instance) + } + log.Printf("[DEBUG] InstanceGroup members: %v", memberUrls) + d.Set("instances", memberUrls) + } + + d.Set("named_port", flattenNamedPorts(instanceGroup.NamedPorts)) + d.Set("description", instanceGroup.Description) + + // Set computed fields + d.Set("network", instanceGroup.Network) + d.Set("size", instanceGroup.Size) + d.Set("project", project) + d.Set("zone", zone) + d.Set("self_link", instanceGroup.SelfLink) + + return nil +} +func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + err := resourceComputeInstanceGroupRead(d, meta) + if err != nil { + return err + } + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + d.Partial(true) + + if d.HasChange("instances") { + // to-do check for no instances + _, to_ := d.GetChange("instances") + // We need to get the current state from d directly because + // it is likely to have been changed by the Read() above. + from_ := d.Get("instances") + to_.(*schema.Set).Remove("FORCE_UPDATE") + + from := convertStringArr(from_.(*schema.Set).List()) + to := convertStringArr(to_.(*schema.Set).List()) + + if !validInstanceURLs(from) { + return fmt.Errorf("Error invalid instance URLs: %v", from) + } + if !validInstanceURLs(to) { + return fmt.Errorf("Error invalid instance URLs: %v", to) + } + + add, remove := calcAddRemove(from, to) + + if len(remove) > 0 { + removeReq := &compute.InstanceGroupsRemoveInstancesRequest{ + Instances: getInstanceReferences(remove), + } + + log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq) + removeOp, err := config.clientCompute.InstanceGroups.RemoveInstances( + project, zone, name, removeReq).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Instances already removed from InstanceGroup: %s", remove) + } else { + return fmt.Errorf("Error removing instances from InstanceGroup: %s", err) + } + } else { + // Wait for the operation to complete + err = computeOperationWait(config.clientCompute, removeOp, project, "Updating InstanceGroup") + if err != nil { + return err + } + } + } + + if len(add) > 0 { + + addReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(add), + } + + log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq) + addOp, err := config.clientCompute.InstanceGroups.AddInstances( + project, zone, name, addReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances from InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWait(config.clientCompute, addOp, project, "Updating InstanceGroup") + if err != nil { + return err + } + } + + d.SetPartial("instances") + } + + if d.HasChange("named_port") { + // Important to fetch via GetChange, because the above Read() will + // have reset the value retrieved via Get() to its current value. + _, namedPorts_ := d.GetChange("named_port") + namedPorts := getNamedPorts(namedPorts_.([]interface{})) + + namedPortsReq := &compute.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq) + op, err := config.clientCompute.InstanceGroups.SetNamedPorts( + project, zone, name, namedPortsReq).Do() + if err != nil { + return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Updating InstanceGroup") + if err != nil { + return err + } + d.SetPartial("named_port") + } + + d.Partial(false) + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + op, err := config.clientCompute.InstanceGroups.Delete(project, zone, name).Do() + if err != nil { + return fmt.Errorf("Error deleting InstanceGroup: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Deleting InstanceGroup") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceComputeInstanceGroupImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid compute instance group specifier. Expecting {zone}/{name}") + } + + d.Set("zone", parts[0]) + d.Set("name", parts[1]) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_compute_instance_group_manager.go b/provider/terraform/resources/resource_compute_instance_group_manager.go new file mode 100644 index 000000000000..b7b71179befc --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance_group_manager.go @@ -0,0 +1,842 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + + computeBeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" +) + +func resourceComputeInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupManagerCreate, + Read: resourceComputeInstanceGroupManagerRead, + Update: resourceComputeInstanceGroupManagerUpdate, + Delete: resourceComputeInstanceGroupManagerDelete, + Importer: &schema.ResourceImporter{ + State: resourceInstanceGroupManagerStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "base_instance_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + + "version": &schema.Schema{ + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + + "target_size": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + + "percent": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + }, + }, + }, + }, + }, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "named_port": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "update_strategy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "REPLACE", + ValidateFunc: validation.StringInSlice([]string{"RESTART", "NONE", "ROLLING_UPDATE", "REPLACE"}, false), + DiffSuppressFunc: func(key, old, new string, d *schema.ResourceData) bool { + if old == "REPLACE" && new == "RESTART" { + return true + } + if old == "RESTART" && new == "REPLACE" { + return true + } + return false + }, + }, + + "target_pools": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: selfLinkRelativePathHash, + }, + + "target_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + + "auto_healing_policies": &schema.Schema{ + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "health_check": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + + "initial_delay_sec": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 3600), + }, + }, + }, + }, + + "rolling_update_policy": &schema.Schema{ + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minimal_action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), + }, + + "max_surge_fixed": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 1, + ConflictsWith: []string{"rolling_update_policy.0.max_surge_percent"}, + }, + + "max_surge_percent": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"rolling_update_policy.0.max_surge_fixed"}, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "max_unavailable_fixed": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 1, + ConflictsWith: []string{"rolling_update_policy.0.max_unavailable_percent"}, + }, + + "max_unavailable_percent": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"rolling_update_policy.0.max_unavailable_fixed"}, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "min_ready_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600), + }, + }, + }, + }, + + "wait_for_instances": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func getNamedPorts(nps []interface{}) []*compute.NamedPort { + namedPorts := make([]*compute.NamedPort, 0, len(nps)) + for _, v := range nps { + np := v.(map[string]interface{}) + namedPorts = append(namedPorts, &compute.NamedPort{ + Name: np["name"].(string), + Port: int64(np["port"].(int)), + }) + } + + return namedPorts +} + +func getNamedPortsBeta(nps []interface{}) []*computeBeta.NamedPort { + namedPorts := make([]*computeBeta.NamedPort, 0, len(nps)) + for _, v := range nps { + np := v.(map[string]interface{}) + namedPorts = append(namedPorts, &computeBeta.NamedPort{ + Name: np["name"].(string), + Port: int64(np["port"].(int)), + }) + } + + return namedPorts +} + +func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + + if _, ok := d.GetOk("rolling_update_policy"); d.Get("update_strategy") == "ROLLING_UPDATE" && !ok { + return fmt.Errorf("[rolling_update_policy] must be set when 'update_strategy' is set to 'ROLLING_UPDATE'") + } + + // Build the parameter + manager := &computeBeta.InstanceGroupManager{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + BaseInstanceName: d.Get("base_instance_name").(string), + InstanceTemplate: d.Get("instance_template").(string), + TargetSize: int64(d.Get("target_size").(int)), + NamedPorts: getNamedPortsBeta(d.Get("named_port").([]interface{})), + TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), + AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), + Versions: expandVersions(d.Get("version").([]interface{})), + // Force send TargetSize to allow a value of 0. + ForceSendFields: []string{"TargetSize"}, + } + + log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) + op, err := config.clientComputeBeta.InstanceGroupManagers.Insert( + project, zone, manager).Do() + + if err != nil { + return fmt.Errorf("Error creating InstanceGroupManager: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(manager.Name) + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Creating InstanceGroupManager") + if err != nil { + return err + } + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func flattenNamedPortsBeta(namedPorts []*computeBeta.NamedPort) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(namedPorts)) + for _, namedPort := range namedPorts { + namedPortMap := make(map[string]interface{}) + namedPortMap["name"] = namedPort.Name + namedPortMap["port"] = namedPort.Port + result = append(result, namedPortMap) + } + return result + +} + +func flattenVersions(versions []*computeBeta.InstanceGroupManagerVersion) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(versions)) + for _, version := range versions { + versionMap := make(map[string]interface{}) + versionMap["name"] = version.Name + versionMap["instance_template"] = ConvertSelfLinkToV1(version.InstanceTemplate) + versionMap["target_size"] = flattenFixedOrPercent(version.TargetSize) + result = append(result, versionMap) + } + + return result +} + +func flattenFixedOrPercent(fixedOrPercent *computeBeta.FixedOrPercent) []map[string]interface{} { + result := make(map[string]interface{}) + if value := fixedOrPercent.Percent; value > 0 { + result["percent"] = value + } else if value := fixedOrPercent.Fixed; value > 0 { + result["fixed"] = fixedOrPercent.Fixed + } else { + return []map[string]interface{}{} + } + return []map[string]interface{}{result} +} + +func getManager(d *schema.ResourceData, meta interface{}) (*computeBeta.InstanceGroupManager, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + region, err := getRegion(d, config) + if err != nil { + return nil, err + } + + getInstanceGroupManager := func(zone string) (interface{}, error) { + return config.clientComputeBeta.InstanceGroupManagers.Get(project, zone, d.Id()).Do() + } + + var manager *computeBeta.InstanceGroupManager + var e error + if zone, _ := getZone(d, config); zone != "" { + manager, e = config.clientComputeBeta.InstanceGroupManagers.Get(project, zone, d.Id()).Do() + + if e != nil { + return nil, handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string))) + } + } else { + // If the resource was imported, the only info we have is the ID. Try to find the resource + // by searching in the region of the project. + var resource interface{} + resource, e = getZonalBetaResourceFromRegion(getInstanceGroupManager, region, config.clientComputeBeta, project) + if e != nil { + return nil, e + } + + manager = resource.(*computeBeta.InstanceGroupManager) + } + + if manager == nil { + log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) + + // The resource doesn't exist anymore + d.SetId("") + return nil, nil + } + + return manager, nil +} + +func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + manager, err := getManager(d, meta) + if err != nil || manager == nil { + return err + } + + d.Set("base_instance_name", manager.BaseInstanceName) + d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate)) + if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { + return err + } + d.Set("name", manager.Name) + d.Set("zone", GetResourceNameFromSelfLink(manager.Zone)) + d.Set("description", manager.Description) + d.Set("project", project) + d.Set("target_size", manager.TargetSize) + d.Set("target_pools", manager.TargetPools) + d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)) + d.Set("fingerprint", manager.Fingerprint) + d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)) + d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)) + update_strategy, ok := d.GetOk("update_strategy") + if !ok { + update_strategy = "REPLACE" + } + d.Set("update_strategy", update_strategy.(string)) + d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)) + + if d.Get("wait_for_instances").(bool) { + conf := resource.StateChangeConf{ + Pending: []string{"creating", "error"}, + Target: []string{"created"}, + Refresh: waitForInstancesRefreshFunc(getManager, d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + } + _, err := conf.WaitForState() + if err != nil { + return err + } + } + + return nil +} + +// Updates an instance group manager by applying the update strategy (REPLACE, RESTART) +// and rolling update policy (PROACTIVE, OPPORTUNISTIC). Updates performed by API +// are OPPORTUNISTIC by default. +func performZoneUpdate(config *Config, id string, updateStrategy string, rollingUpdatePolicy *computeBeta.InstanceGroupManagerUpdatePolicy, versions []*computeBeta.InstanceGroupManagerVersion, project string, zone string) error { + if updateStrategy == "RESTART" || updateStrategy == "REPLACE" { + managedInstances, err := config.clientComputeBeta.InstanceGroupManagers.ListManagedInstances(project, zone, id).Do() + if err != nil { + return fmt.Errorf("Error getting instance group managers instances: %s", err) + } + + managedInstanceCount := len(managedInstances.ManagedInstances) + instances := make([]string, managedInstanceCount) + for i, v := range managedInstances.ManagedInstances { + instances[i] = v.Instance + } + + recreateInstances := &computeBeta.InstanceGroupManagersRecreateInstancesRequest{ + Instances: instances, + } + + op, err := config.clientComputeBeta.InstanceGroupManagers.RecreateInstances(project, zone, id, recreateInstances).Do() + if err != nil { + return fmt.Errorf("Error restarting instance group managers instances: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWaitTime(config.clientCompute, op, project, managedInstanceCount*4, "Restarting InstanceGroupManagers instances") + if err != nil { + return err + } + } + + if updateStrategy == "ROLLING_UPDATE" { + // UpdatePolicy is set for InstanceGroupManager on update only, because it is only relevant for `Patch` calls. + // Other tools(gcloud and UI) capable of executing the same `ROLLING UPDATE` call + // expect those values to be provided by user as part of the call + // or provide their own defaults without respecting what was previously set on UpdateManager. + // To follow the same logic, we provide policy values on relevant update change only. + manager := &computeBeta.InstanceGroupManager{ + UpdatePolicy: rollingUpdatePolicy, + Versions: versions, + } + + op, err := config.clientComputeBeta.InstanceGroupManagers.Patch(project, zone, id, manager).Do() + if err != nil { + return fmt.Errorf("Error updating managed group instances: %s", err) + } + + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating managed group instances") + if err != nil { + return err + } + } + + return nil +} + +func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if _, ok := d.GetOk("rolling_update_policy"); d.Get("update_strategy") == "ROLLING_UPDATE" && !ok { + return fmt.Errorf("[rolling_update_policy] must be set when 'update_strategy' is set to 'ROLLING_UPDATE'") + } + + // If target_pools changes then update + if d.HasChange("target_pools") { + targetPools := convertStringSet(d.Get("target_pools").(*schema.Set)) + + // Build the parameter + setTargetPools := &computeBeta.InstanceGroupManagersSetTargetPoolsRequest{ + Fingerprint: d.Get("fingerprint").(string), + TargetPools: targetPools, + } + + op, err := config.clientComputeBeta.InstanceGroupManagers.SetTargetPools( + project, zone, d.Id(), setTargetPools).Do() + + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating InstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("target_pools") + } + + // If named_port changes then update: + if d.HasChange("named_port") { + + // Build the parameters for a "SetNamedPorts" request: + namedPorts := getNamedPortsBeta(d.Get("named_port").([]interface{})) + setNamedPorts := &computeBeta.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + // Make the request: + op, err := config.clientComputeBeta.InstanceGroups.SetNamedPorts( + project, zone, d.Id(), setNamedPorts).Do() + + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete: + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating InstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("named_port") + } + + if d.HasChange("target_size") { + targetSize := int64(d.Get("target_size").(int)) + op, err := config.clientComputeBeta.InstanceGroupManagers.Resize( + project, zone, d.Id(), targetSize).Do() + + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating InstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("target_size") + } + + // We will always be in v0beta inside this conditional + if d.HasChange("auto_healing_policies") { + setAutoHealingPoliciesRequest := &computeBeta.InstanceGroupManagersSetAutoHealingRequest{} + if v, ok := d.GetOk("auto_healing_policies"); ok { + setAutoHealingPoliciesRequest.AutoHealingPolicies = expandAutoHealingPolicies(v.([]interface{})) + } + + op, err := config.clientComputeBeta.InstanceGroupManagers.SetAutoHealingPolicies( + project, zone, d.Id(), setAutoHealingPoliciesRequest).Do() + + if err != nil { + return fmt.Errorf("Error updating AutoHealingPolicies: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating AutoHealingPolicies") + if err != nil { + return err + } + + d.SetPartial("auto_healing_policies") + } + + // If instance_template changes then update + if d.HasChange("instance_template") { + // Build the parameter + setInstanceTemplate := &computeBeta.InstanceGroupManagersSetInstanceTemplateRequest{ + InstanceTemplate: d.Get("instance_template").(string), + } + + op, err := config.clientComputeBeta.InstanceGroupManagers.SetInstanceTemplate(project, zone, d.Id(), setInstanceTemplate).Do() + + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating InstanceGroupManager") + if err != nil { + return err + } + + updateStrategy := d.Get("update_strategy").(string) + rollingUpdatePolicy := expandUpdatePolicy(d.Get("rolling_update_policy").([]interface{})) + err = performZoneUpdate(config, d.Id(), updateStrategy, rollingUpdatePolicy, nil, project, zone) + d.SetPartial("instance_template") + } + + // If version changes then update + if d.HasChange("version") { + updateStrategy := d.Get("update_strategy").(string) + rollingUpdatePolicy := expandUpdatePolicy(d.Get("rolling_update_policy").([]interface{})) + versions := expandVersions(d.Get("version").([]interface{})) + err = performZoneUpdate(config, d.Id(), updateStrategy, rollingUpdatePolicy, versions, project, zone) + if err != nil { + return err + } + + d.SetPartial("version") + } + + d.Partial(false) + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + + op, err := config.clientComputeBeta.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + attempt := 0 + for err != nil && attempt < 20 { + attempt++ + time.Sleep(2000 * time.Millisecond) + op, err = config.clientComputeBeta.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + } + + if err != nil { + return fmt.Errorf("Error deleting instance group manager: %s", err) + } + + currentSize := int64(d.Get("target_size").(int)) + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Deleting InstanceGroupManager") + + for err != nil && currentSize > 0 { + if !strings.Contains(err.Error(), "timeout") { + return err + } + + instanceGroup, err := config.clientComputeBeta.InstanceGroups.Get( + project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error getting instance group size: %s", err) + } + + instanceGroupSize := instanceGroup.Size + + if instanceGroupSize >= currentSize { + return fmt.Errorf("Error, instance group isn't shrinking during delete") + } + + log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroupSize, currentSize) + currentSize = instanceGroupSize + err = computeSharedOperationWait(config.clientCompute, op, project, "Deleting InstanceGroupManager") + } + + d.SetId("") + return nil +} + +func expandAutoHealingPolicies(configured []interface{}) []*computeBeta.InstanceGroupManagerAutoHealingPolicy { + autoHealingPolicies := make([]*computeBeta.InstanceGroupManagerAutoHealingPolicy, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + autoHealingPolicy := computeBeta.InstanceGroupManagerAutoHealingPolicy{ + HealthCheck: data["health_check"].(string), + InitialDelaySec: int64(data["initial_delay_sec"].(int)), + } + + autoHealingPolicies = append(autoHealingPolicies, &autoHealingPolicy) + } + return autoHealingPolicies +} + +func expandVersions(configured []interface{}) []*computeBeta.InstanceGroupManagerVersion { + versions := make([]*computeBeta.InstanceGroupManagerVersion, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + + version := computeBeta.InstanceGroupManagerVersion{ + Name: data["name"].(string), + InstanceTemplate: data["instance_template"].(string), + TargetSize: expandFixedOrPercent(data["target_size"].([]interface{})), + } + + versions = append(versions, &version) + } + return versions +} + +func expandFixedOrPercent(configured []interface{}) *computeBeta.FixedOrPercent { + fixedOrPercent := &computeBeta.FixedOrPercent{} + + for _, raw := range configured { + data := raw.(map[string]interface{}) + if percent := data["percent"]; percent.(int) > 0 { + fixedOrPercent.Percent = int64(percent.(int)) + } else { + fixedOrPercent.Fixed = int64(data["fixed"].(int)) + fixedOrPercent.ForceSendFields = []string{"Fixed"} + } + } + return fixedOrPercent +} + +func expandUpdatePolicy(configured []interface{}) *computeBeta.InstanceGroupManagerUpdatePolicy { + updatePolicy := &computeBeta.InstanceGroupManagerUpdatePolicy{} + + for _, raw := range configured { + data := raw.(map[string]interface{}) + + updatePolicy.MinimalAction = data["minimal_action"].(string) + updatePolicy.Type = data["type"].(string) + + // percent and fixed values are conflicting + // when the percent values are set, the fixed values will be ignored + if v := data["max_surge_percent"]; v.(int) > 0 { + updatePolicy.MaxSurge = &computeBeta.FixedOrPercent{ + Percent: int64(v.(int)), + } + } else { + updatePolicy.MaxSurge = &computeBeta.FixedOrPercent{ + Fixed: int64(data["max_surge_fixed"].(int)), + // allow setting this value to 0 + ForceSendFields: []string{"Fixed"}, + } + } + + if v := data["max_unavailable_percent"]; v.(int) > 0 { + updatePolicy.MaxUnavailable = &computeBeta.FixedOrPercent{ + Percent: int64(v.(int)), + } + } else { + updatePolicy.MaxUnavailable = &computeBeta.FixedOrPercent{ + Fixed: int64(data["max_unavailable_fixed"].(int)), + // allow setting this value to 0 + ForceSendFields: []string{"Fixed"}, + } + } + + if v, ok := data["min_ready_sec"]; ok { + updatePolicy.MinReadySec = int64(v.(int)) + } + } + return updatePolicy +} + +func flattenAutoHealingPolicies(autoHealingPolicies []*computeBeta.InstanceGroupManagerAutoHealingPolicy) []map[string]interface{} { + autoHealingPoliciesSchema := make([]map[string]interface{}, 0, len(autoHealingPolicies)) + for _, autoHealingPolicy := range autoHealingPolicies { + data := map[string]interface{}{ + "health_check": autoHealingPolicy.HealthCheck, + "initial_delay_sec": autoHealingPolicy.InitialDelaySec, + } + + autoHealingPoliciesSchema = append(autoHealingPoliciesSchema, data) + } + return autoHealingPoliciesSchema +} + +func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("wait_for_instances", false) + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_compute_instance_group_migrate.go b/provider/terraform/resources/resource_compute_instance_group_migrate.go new file mode 100644 index 000000000000..fc919265f16b --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance_group_migrate.go @@ -0,0 +1,89 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeInstanceGroupMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance Group State v0; migrating to v1") + is, err := migrateInstanceGroupStateV0toV1(is) + if err != nil { + return is, err + } + fallthrough + case 1: + log.Println("[INFO] Found Compute Instance Group State v1; migrating to v2") + is, err := migrateInstanceGroupStateV1toV2(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateInstanceGroupStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + newInstances := []string{} + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "instances.") { + continue + } + + if k == "instances.#" { + continue + } + + // Key is now of the form instances.%d + kParts := strings.Split(k, ".") + + // Sanity check: two parts should be there and should be a number + badFormat := false + if len(kParts) != 2 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf("migration error: found instances key in unexpected format: %s", k) + } + + newInstances = append(newInstances, v) + delete(is.Attributes, k) + } + + for _, v := range newInstances { + hash := schema.HashString(v) + newKey := fmt.Sprintf("instances.%d", hash) + is.Attributes[newKey] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateInstanceGroupStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + is.ID = fmt.Sprintf("%s/%s", is.Attributes["zone"], is.Attributes["name"]) + + return is, nil +} diff --git a/provider/terraform/resources/resource_compute_instance_migrate.go b/provider/terraform/resources/resource_compute_instance_migrate.go new file mode 100644 index 000000000000..b69b4c4a3806 --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance_migrate.go @@ -0,0 +1,512 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeInstanceMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + var err error + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance State v0; migrating to v1") + is, err = migrateStateV0toV1(is) + if err != nil { + return is, err + } + fallthrough + case 1: + log.Println("[INFO] Found Compute Instance State v1; migrating to v2") + is, err = migrateStateV1toV2(is) + if err != nil { + return is, err + } + fallthrough + case 2: + log.Println("[INFO] Found Compute Instance State v2; migrating to v3") + is, err = migrateStateV2toV3(is) + if err != nil { + return is, err + } + fallthrough + case 3: + log.Println("[INFO] Found Compute Instance State v3; migrating to v4") + is, err = migrateStateV3toV4(is, meta) + if err != nil { + return is, err + } + fallthrough + case 4: + log.Println("[INFO] Found Compute Instance State v4; migrating to v5") + is, err = migrateStateV4toV5(is, meta) + if err != nil { + return is, err + } + fallthrough + case 5: + log.Println("[INFO] Found Compute Instance State v5; migrating to v6") + is, err = migrateStateV5toV6(is) + if err != nil { + return is, err + } + // when adding case 6, make sure to turn this into a fallthrough + return is, err + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Delete old count + delete(is.Attributes, "metadata.#") + + newMetadata := make(map[string]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "metadata.") { + continue + } + + // We have a key that looks like "metadata.*" and we know it's not + // metadata.# because we deleted it above, so it must be metadata.. + // from the List of Maps. Just need to convert it to a single Map by + // ditching the '' field. + kParts := strings.SplitN(k, ".", 3) + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 3 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found metadata key in unexpected format: %s", k) + } + + // Rejoin as "metadata." + newK := strings.Join([]string{kParts[0], kParts[2]}, ".") + newMetadata[newK] = v + delete(is.Attributes, k) + } + + for k, v := range newMetadata { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Maps service account index to list of scopes for that sccount + newScopesMap := make(map[string][]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "service_account.") { + continue + } + + if k == "service_account.#" { + continue + } + + if strings.HasSuffix(k, ".scopes.#") { + continue + } + + if strings.HasSuffix(k, ".email") { + continue + } + + // Key is now of the form service_account.%d.scopes.%d + kParts := strings.Split(k, ".") + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 4 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found scope key in unexpected format: %s", k) + } + + newScopesMap[kParts[1]] = append(newScopesMap[kParts[1]], v) + + delete(is.Attributes, k) + } + + for service_acct_index, newScopes := range newScopesMap { + for _, newScope := range newScopes { + hash := hashcode.String(canonicalizeServiceScope(newScope)) + newKey := fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash) + is.Attributes[newKey] = newScope + } + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + is.Attributes["create_timeout"] = "4" + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV3toV4(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Read instance from GCP. Since disks are not necessarily returned from the API in the order they were set, + // we have no other way to know which source belongs to which attached disk. + // Also note that the following code modifies the returned instance- if you need immutability, please change + // this to make a copy of the needed data. + config := meta.(*Config) + instance, err := getInstanceFromInstanceState(config, is) + if err != nil { + return is, fmt.Errorf("migration error: %s", err) + } + diskList, err := getAllDisksFromInstanceState(config, is) + if err != nil { + return is, fmt.Errorf("migration error: %s", err) + } + allDisks := make(map[string]*compute.Disk) + for _, disk := range diskList { + allDisks[disk.Name] = disk + } + + hasBootDisk := is.Attributes["boot_disk.#"] == "1" + + scratchDisks := 0 + if v := is.Attributes["scratch_disk.#"]; v != "" { + scratchDisks, err = strconv.Atoi(v) + if err != nil { + return is, fmt.Errorf("migration error: found scratch_disk.# value in unexpected format: %s", err) + } + } + + attachedDisks := 0 + if v := is.Attributes["attached_disk.#"]; v != "" { + attachedDisks, err = strconv.Atoi(v) + if err != nil { + return is, fmt.Errorf("migration error: found attached_disk.# value in unexpected format: %s", err) + } + } + + disks, err := strconv.Atoi(is.Attributes["disk.#"]) + if err != nil { + return is, fmt.Errorf("migration error: found disk.# value in unexpected format: %s", err) + } + + for i := 0; i < disks; i++ { + if !hasBootDisk && i == 0 { + is.Attributes["boot_disk.#"] = "1" + + // Note: the GCP API does not allow for scratch disks to be boot disks, so this situation + // should never occur. + if is.Attributes["disk.0.scratch_disk"] == "true" { + return is, fmt.Errorf("migration error: found scratch disk at index 0") + } + + for _, disk := range instance.Disks { + if disk.Boot { + is.Attributes["boot_disk.0.source"] = GetResourceNameFromSelfLink(disk.Source) + is.Attributes["boot_disk.0.device_name"] = disk.DeviceName + break + } + } + is.Attributes["boot_disk.0.auto_delete"] = is.Attributes["disk.0.auto_delete"] + is.Attributes["boot_disk.0.disk_encryption_key_raw"] = is.Attributes["disk.0.disk_encryption_key_raw"] + is.Attributes["boot_disk.0.disk_encryption_key_sha256"] = is.Attributes["disk.0.disk_encryption_key_sha256"] + + if is.Attributes["disk.0.size"] != "" && is.Attributes["disk.0.size"] != "0" { + is.Attributes["boot_disk.0.initialize_params.#"] = "1" + is.Attributes["boot_disk.0.initialize_params.0.size"] = is.Attributes["disk.0.size"] + } + if is.Attributes["disk.0.type"] != "" { + is.Attributes["boot_disk.0.initialize_params.#"] = "1" + is.Attributes["boot_disk.0.initialize_params.0.type"] = is.Attributes["disk.0.type"] + } + if is.Attributes["disk.0.image"] != "" { + is.Attributes["boot_disk.0.initialize_params.#"] = "1" + is.Attributes["boot_disk.0.initialize_params.0.image"] = is.Attributes["disk.0.image"] + } + } else if is.Attributes[fmt.Sprintf("disk.%d.scratch", i)] == "true" { + // Note: the GCP API does not allow for scratch disks without auto_delete, so this situation + // should never occur. + if is.Attributes[fmt.Sprintf("disk.%d.auto_delete", i)] != "true" { + return is, fmt.Errorf("migration error: attempted to migrate scratch disk where auto_delete is not true") + } + + is.Attributes[fmt.Sprintf("scratch_disk.%d.interface", scratchDisks)] = "SCSI" + + scratchDisks++ + } else { + // If disk is neither boot nor scratch, then it is attached. + + disk, err := getDiskFromAttributes(config, instance, allDisks, is.Attributes, i) + if err != nil { + return is, fmt.Errorf("migration error: %s", err) + } + + is.Attributes[fmt.Sprintf("attached_disk.%d.source", attachedDisks)] = disk.Source + is.Attributes[fmt.Sprintf("attached_disk.%d.device_name", attachedDisks)] = disk.DeviceName + is.Attributes[fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", attachedDisks)] = is.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)] + is.Attributes[fmt.Sprintf("attached_disk.%d.disk_encryption_key_sha256", attachedDisks)] = is.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)] + + attachedDisks++ + } + } + + for k, _ := range is.Attributes { + if !strings.HasPrefix(k, "disk.") { + continue + } + + delete(is.Attributes, k) + } + if scratchDisks > 0 { + is.Attributes["scratch_disk.#"] = strconv.Itoa(scratchDisks) + } + if attachedDisks > 0 { + is.Attributes["attached_disk.#"] = strconv.Itoa(attachedDisks) + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV4toV5(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if v := is.Attributes["disk.#"]; v != "" { + return migrateStateV3toV4(is, meta) + } + return is, nil +} + +func getInstanceFromInstanceState(config *Config, is *terraform.InstanceState) (*compute.Instance, error) { + project, ok := is.Attributes["project"] + if !ok { + if config.Project == "" { + return nil, fmt.Errorf("could not determine 'project'") + } else { + project = config.Project + } + } + + zone, ok := is.Attributes["zone"] + if !ok { + return nil, fmt.Errorf("could not determine 'zone'") + } + + instance, err := config.clientCompute.Instances.Get( + project, zone, is.ID).Do() + if err != nil { + return nil, fmt.Errorf("error reading instance: %s", err) + } + + return instance, nil +} + +func getAllDisksFromInstanceState(config *Config, is *terraform.InstanceState) ([]*compute.Disk, error) { + project, ok := is.Attributes["project"] + if !ok { + if config.Project == "" { + return nil, fmt.Errorf("could not determine 'project'") + } else { + project = config.Project + } + } + + zone, ok := is.Attributes["zone"] + if !ok { + return nil, fmt.Errorf("could not determine 'zone'") + } + + diskList := []*compute.Disk{} + token := "" + for { + disks, err := config.clientCompute.Disks.List(project, zone).PageToken(token).Do() + if err != nil { + return nil, fmt.Errorf("error reading disks: %s", err) + } + diskList = append(diskList, disks.Items...) + token = disks.NextPageToken + if token == "" { + break + } + } + + return diskList, nil +} + +func getDiskFromAttributes(config *Config, instance *compute.Instance, allDisks map[string]*compute.Disk, attributes map[string]string, i int) (*compute.AttachedDisk, error) { + if diskSource := attributes[fmt.Sprintf("disk.%d.disk", i)]; diskSource != "" { + return getDiskFromSource(instance, diskSource) + } + + if deviceName := attributes[fmt.Sprintf("disk.%d.device_name", i)]; deviceName != "" { + return getDiskFromDeviceName(instance, deviceName) + } + + if encryptionKey := attributes[fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)]; encryptionKey != "" { + return getDiskFromEncryptionKey(instance, encryptionKey) + } + + autoDelete, err := strconv.ParseBool(attributes[fmt.Sprintf("disk.%d.auto_delete", i)]) + if err != nil { + return nil, fmt.Errorf("error parsing auto_delete attribute of disk %d", i) + } + image := attributes[fmt.Sprintf("disk.%d.image", i)] + + // We know project and zone are set because we used them to read the instance + project, ok := attributes["project"] + if !ok { + project = config.Project + } + zone := attributes["zone"] + return getDiskFromAutoDeleteAndImage(config, instance, allDisks, autoDelete, image, project, zone) +} + +func getDiskFromSource(instance *compute.Instance, source string) (*compute.AttachedDisk, error) { + for _, disk := range instance.Disks { + if disk.Boot == true || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + // we can just compare suffixes because terraform only allows setting "disk" by name and uses + // the zone of the instance so we know there can be no duplicate names. + if strings.HasSuffix(disk.Source, "/"+source) { + return disk, nil + } + } + return nil, fmt.Errorf("could not find attached disk with source %q", source) +} + +func getDiskFromDeviceName(instance *compute.Instance, deviceName string) (*compute.AttachedDisk, error) { + for _, disk := range instance.Disks { + if disk.Boot == true || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.DeviceName == deviceName { + return disk, nil + } + } + return nil, fmt.Errorf("could not find attached disk with deviceName %q", deviceName) +} + +func getDiskFromEncryptionKey(instance *compute.Instance, encryptionKey string) (*compute.AttachedDisk, error) { + encryptionSha, err := hash256(encryptionKey) + if err != nil { + return nil, err + } + for _, disk := range instance.Disks { + if disk.Boot == true || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.DiskEncryptionKey.Sha256 == encryptionSha { + return disk, nil + } + } + return nil, fmt.Errorf("could not find attached disk with encryption hash %q", encryptionSha) +} + +func getDiskFromAutoDeleteAndImage(config *Config, instance *compute.Instance, allDisks map[string]*compute.Disk, autoDelete bool, image, project, zone string) (*compute.AttachedDisk, error) { + img, err := resolveImage(config, project, image) + if err != nil { + return nil, err + } + imgParts := strings.Split(img, "/projects/") + canonicalImage := imgParts[len(imgParts)-1] + + for i, disk := range instance.Disks { + if disk.Boot == true || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.AutoDelete == autoDelete { + // Read the disk to check if its image matches + fullDisk := allDisks[GetResourceNameFromSelfLink(disk.Source)] + sourceImage, err := getRelativePath(fullDisk.SourceImage) + if err != nil { + return nil, err + } + if canonicalImage == sourceImage { + // Delete this disk because there might be multiple that match + instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...) + return disk, nil + } + } + } + + // We're not done! It's possible the disk was created with an image family rather than the image itself. + // Now, do the exact same iteration but do some prefix matching to check if the families match. + // This assumes that all disks with a given family have a sourceImage whose name starts with the name of + // the image family. + canonicalImage = strings.Replace(canonicalImage, "/family/", "/", -1) + for i, disk := range instance.Disks { + if disk.Boot == true || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.AutoDelete == autoDelete { + // Read the disk to check if its image matches + fullDisk := allDisks[GetResourceNameFromSelfLink(disk.Source)] + sourceImage, err := getRelativePath(fullDisk.SourceImage) + if err != nil { + return nil, err + } + + if strings.Contains(sourceImage, "/"+canonicalImage+"-") { + // Delete this disk because there might be multiple that match + instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...) + return disk, nil + } + } + } + + return nil, fmt.Errorf("could not find attached disk with image %q", image) +} + +func migrateStateV5toV6(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + if is.Attributes["boot_disk.0.initialize_params.#"] == "1" { + if (is.Attributes["boot_disk.0.initialize_params.0.size"] == "0" || + is.Attributes["boot_disk.0.initialize_params.0.size"] == "") && + is.Attributes["boot_disk.0.initialize_params.0.type"] == "" && + is.Attributes["boot_disk.0.initialize_params.0.image"] == "" { + is.Attributes["boot_disk.0.initialize_params.#"] = "0" + delete(is.Attributes, "boot_disk.0.initialize_params.0.size") + delete(is.Attributes, "boot_disk.0.initialize_params.0.type") + delete(is.Attributes, "boot_disk.0.initialize_params.0.image") + } + } + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/provider/terraform/resources/resource_compute_instance_template.go b/provider/terraform/resources/resource_compute_instance_template.go new file mode 100644 index 000000000000..854616fd04b1 --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance_template.go @@ -0,0 +1,864 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + computeBeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/googleapi" +) + +func resourceComputeInstanceTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceTemplateCreate, + Read: resourceComputeInstanceTemplateRead, + Delete: resourceComputeInstanceTemplateDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, + CustomizeDiff: resourceComputeInstanceTemplateSourceImageCustomizeDiff, + MigrateState: resourceComputeInstanceTemplateMigrateState, + + // A compute instance template is more or less a subset of a compute + // instance. Please attempt to maintain consistency with the + // resource_compute_instance schema when updating this one. + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: validateGCPName, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, + + "disk": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "boot": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "disk_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "disk_size_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "disk_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "source_image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + }, + }, + + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Removed: "Use 'scheduling.automatic_restart' instead.", + }, + + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "instance_description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + + "metadata_startup_script": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, // Computed because it is set if network_ip is set. + Optional: true, + ForceNew: true, + }, + + "network_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, // Computed because it is set if address is set. + Optional: true, + ForceNew: true, + Deprecated: "Please use address", + }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "subnetwork_project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "network_tier": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), + }, + // Instance templates will never have an + // 'assigned NAT IP', but we need this in + // the schema to allow us to share flatten + // code with an instance, which could. + "assigned_nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Deprecated: "Use network_interface.access_config.nat_ip instead.", + }, + }, + }, + }, + + "alias_ip_range": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: ipCidrRangeDiffSuppress, + }, + "subnetwork_range_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + }, + }, + }, + + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Removed: "Use 'scheduling.on_host_maintenance' instead.", + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "scheduling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preemptible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "service_account": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + Set: stringScopeHashcode, + }, + }, + }, + }, + + "guest_accelerator": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + }, + }, + }, + + "min_cpu_platform": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "tags_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceComputeInstanceTemplateSourceImageCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { + config := meta.(*Config) + project, err := getProjectFromDiff(diff, config) + if err != nil { + return err + } + numDisks := diff.Get("disk.#").(int) + for i := 0; i < numDisks; i++ { + key := fmt.Sprintf("disk.%d.source_image", i) + if diff.HasChange(key) { + old, new := diff.GetChange(key) + if old == "" || new == "" { + // no sense in resolving empty strings + err = diff.ForceNew(key) + if err != nil { + return err + } + continue + } + oldResolved, err := resolveImage(config, project, old.(string)) + if err != nil { + return err + } + oldResolved, err = resolvedImageSelfLink(project, oldResolved) + if err != nil { + return err + } + newResolved, err := resolveImage(config, project, new.(string)) + if err != nil { + return err + } + newResolved, err = resolvedImageSelfLink(project, newResolved) + if err != nil { + return err + } + if oldResolved != newResolved { + err = diff.ForceNew(key) + if err != nil { + return err + } + continue + } + err = diff.Clear(key) + if err != nil { + return err + } + } + } + return nil +} + +func buildDisks(d *schema.ResourceData, config *Config) ([]*computeBeta.AttachedDisk, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + disksCount := d.Get("disk.#").(int) + + disks := make([]*computeBeta.AttachedDisk, 0, disksCount) + for i := 0; i < disksCount; i++ { + prefix := fmt.Sprintf("disk.%d", i) + + // Build the disk + var disk computeBeta.AttachedDisk + disk.Type = "PERSISTENT" + disk.Mode = "READ_WRITE" + disk.Interface = "SCSI" + disk.Boot = i == 0 + disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) + + if v, ok := d.GetOk(prefix + ".boot"); ok { + disk.Boot = v.(bool) + } + + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk(prefix + ".source"); ok { + disk.Source = v.(string) + } else { + disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{} + + if v, ok := d.GetOk(prefix + ".disk_name"); ok { + disk.InitializeParams.DiskName = v.(string) + } + if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { + disk.InitializeParams.DiskSizeGb = int64(v.(int)) + } + disk.InitializeParams.DiskType = "pd-standard" + if v, ok := d.GetOk(prefix + ".disk_type"); ok { + disk.InitializeParams.DiskType = v.(string) + } + + if v, ok := d.GetOk(prefix + ".source_image"); ok { + imageName := v.(string) + imageUrl, err := resolveImage(config, project, imageName) + if err != nil { + return nil, fmt.Errorf( + "Error resolving image name '%s': %s", + imageName, err) + } + disk.InitializeParams.SourceImage = imageUrl + } + } + + if v, ok := d.GetOk(prefix + ".interface"); ok { + disk.Interface = v.(string) + } + + if v, ok := d.GetOk(prefix + ".mode"); ok { + disk.Mode = v.(string) + } + + if v, ok := d.GetOk(prefix + ".type"); ok { + disk.Type = v.(string) + } + + disks = append(disks, &disk) + } + + return disks, nil +} + +// We don't share this code with compute instances because instances want a +// partial URL, but instance templates want the bare accelerator name (despite +// the docs saying otherwise). +// +// Using a partial URL on an instance template results in: +// Invalid value for field 'resource.properties.guestAccelerators[0].acceleratorType': +// 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80'. +// Accelerator type 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80' +// must be a valid resource name (not an url). +func expandInstanceTemplateGuestAccelerators(d TerraformResourceData, config *Config) []*computeBeta.AcceleratorConfig { + configs, ok := d.GetOk("guest_accelerator") + if !ok { + return nil + } + accels := configs.([]interface{}) + guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(accels)) + for _, raw := range accels { + data := raw.(map[string]interface{}) + if data["count"].(int) == 0 { + continue + } + guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{ + AcceleratorCount: int64(data["count"].(int)), + // We can't use ParseAcceleratorFieldValue here because an instance + // template does not have a zone we can use. + AcceleratorType: data["type"].(string), + }) + } + + return guestAccelerators +} + +func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceProperties := &computeBeta.InstanceProperties{ + CanIpForward: d.Get("can_ip_forward").(bool), + Description: d.Get("instance_description").(string), + MachineType: d.Get("machine_type").(string), + MinCpuPlatform: d.Get("min_cpu_platform").(string), + } + + disks, err := buildDisks(d, config) + if err != nil { + return err + } + instanceProperties.Disks = disks + + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return err + } + instanceProperties.Metadata = metadata + networks, err := expandNetworkInterfaces(d, config) + if err != nil { + return err + } + instanceProperties.NetworkInterfaces = networks + + instanceProperties.Scheduling = &computeBeta.Scheduling{} + instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE" + + forceSendFieldsScheduling := make([]string, 0, 3) + var hasSendMaintenance bool + hasSendMaintenance = false + if v, ok := d.GetOk("scheduling"); ok { + _schedulings := v.([]interface{}) + if len(_schedulings) > 1 { + return fmt.Errorf("Error, at most one `scheduling` block can be defined") + } + _scheduling := _schedulings[0].(map[string]interface{}) + + // "automatic_restart" has a default value and is always safe to dereference + automaticRestart := _scheduling["automatic_restart"].(bool) + instanceProperties.Scheduling.AutomaticRestart = googleapi.Bool(automaticRestart) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "AutomaticRestart") + + if vp, okp := _scheduling["on_host_maintenance"]; okp { + instanceProperties.Scheduling.OnHostMaintenance = vp.(string) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") + hasSendMaintenance = true + } + + if vp, okp := _scheduling["preemptible"]; okp { + instanceProperties.Scheduling.Preemptible = vp.(bool) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "Preemptible") + if vp.(bool) && !hasSendMaintenance { + instanceProperties.Scheduling.OnHostMaintenance = "TERMINATE" + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") + } + } + } + instanceProperties.Scheduling.ForceSendFields = forceSendFieldsScheduling + + instanceProperties.ServiceAccounts = expandServiceAccounts(d.Get("service_account").([]interface{})) + + instanceProperties.GuestAccelerators = expandInstanceTemplateGuestAccelerators(d, config) + + instanceProperties.Tags = resourceInstanceTags(d) + if _, ok := d.GetOk("labels"); ok { + instanceProperties.Labels = expandLabels(d) + } + + var itName string + if v, ok := d.GetOk("name"); ok { + itName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + itName = resource.PrefixedUniqueId(v.(string)) + } else { + itName = resource.UniqueId() + } + instanceTemplate := &computeBeta.InstanceTemplate{ + Description: d.Get("description").(string), + Properties: instanceProperties, + Name: itName, + } + + op, err := config.clientComputeBeta.InstanceTemplates.Insert(project, instanceTemplate).Do() + if err != nil { + return fmt.Errorf("Error creating instance template: %s", err) + } + + // Store the ID now + d.SetId(instanceTemplate.Name) + + err = computeSharedOperationWait(config.clientCompute, op, project, "Creating Instance Template") + if err != nil { + return err + } + + return resourceComputeInstanceTemplateRead(d, meta) +} + +func flattenDisks(disks []*computeBeta.AttachedDisk, d *schema.ResourceData, defaultProject string) ([]map[string]interface{}, error) { + result := make([]map[string]interface{}, 0, len(disks)) + for _, disk := range disks { + diskMap := make(map[string]interface{}) + if disk.InitializeParams != nil { + if disk.InitializeParams.SourceImage != "" { + selfLink, err := resolvedImageSelfLink(defaultProject, disk.InitializeParams.SourceImage) + if err != nil { + return nil, errwrap.Wrapf("Error expanding source image input to self_link: {{err}}", err) + } + path, err := getRelativePath(selfLink) + if err != nil { + return nil, errwrap.Wrapf("Error getting relative path for source image: {{err}}", err) + } + diskMap["source_image"] = path + } else { + diskMap["source_image"] = "" + } + diskMap["disk_type"] = disk.InitializeParams.DiskType + diskMap["disk_name"] = disk.InitializeParams.DiskName + diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb + } + diskMap["auto_delete"] = disk.AutoDelete + diskMap["boot"] = disk.Boot + diskMap["device_name"] = disk.DeviceName + diskMap["interface"] = disk.Interface + diskMap["source"] = ConvertSelfLinkToV1(disk.Source) + diskMap["mode"] = disk.Mode + diskMap["type"] = disk.Type + result = append(result, diskMap) + } + return result, nil +} + +func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceTemplate, err := config.clientComputeBeta.InstanceTemplates.Get(project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) + } + + // Set the metadata fingerprint if there is one. + if instanceTemplate.Properties.Metadata != nil { + if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil { + return fmt.Errorf("Error setting metadata_fingerprint: %s", err) + } + + md := instanceTemplate.Properties.Metadata + + _md := flattenMetadataBeta(md) + + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if err = d.Set("metadata_startup_script", script); err != nil { + return fmt.Errorf("Error setting metadata_startup_script: %s", err) + } + delete(_md, "startup-script") + } + if err = d.Set("metadata", _md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + } + + // Set the tags fingerprint if there is one. + if instanceTemplate.Properties.Tags != nil { + if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + } else { + d.Set("tags_fingerprint", "") + } + if instanceTemplate.Properties.Labels != nil { + d.Set("labels", instanceTemplate.Properties.Labels) + } + if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err = d.Set("name", instanceTemplate.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if instanceTemplate.Properties.Disks != nil { + disks, err := flattenDisks(instanceTemplate.Properties.Disks, d, project) + if err != nil { + return fmt.Errorf("error flattening disks: %s", err) + } + if err = d.Set("disk", disks); err != nil { + return fmt.Errorf("Error setting disk: %s", err) + } + } + if err = d.Set("description", instanceTemplate.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + if err = d.Set("min_cpu_platform", instanceTemplate.Properties.MinCpuPlatform); err != nil { + return fmt.Errorf("Error setting min_cpu_platform: %s", err) + } + + if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil { + return fmt.Errorf("Error setting can_ip_forward: %s", err) + } + + if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil { + return fmt.Errorf("Error setting instance_description: %s", err) + } + if err = d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if instanceTemplate.Properties.NetworkInterfaces != nil { + networkInterfaces, region, _, _, err := flattenNetworkInterfaces(d, config, instanceTemplate.Properties.NetworkInterfaces) + if err != nil { + return err + } + if err = d.Set("network_interface", networkInterfaces); err != nil { + return fmt.Errorf("Error setting network_interface: %s", err) + } + // region is where to look up the subnetwork if there is one attached to the instance template + if region != "" { + if err = d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + } + } + if instanceTemplate.Properties.Scheduling != nil { + scheduling := flattenScheduling(instanceTemplate.Properties.Scheduling) + if err = d.Set("scheduling", scheduling); err != nil { + return fmt.Errorf("Error setting scheduling: %s", err) + } + } + if instanceTemplate.Properties.Tags != nil { + if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } else { + if err = d.Set("tags", nil); err != nil { + return fmt.Errorf("Error setting empty tags: %s", err) + } + } + if instanceTemplate.Properties.ServiceAccounts != nil { + if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil { + return fmt.Errorf("Error setting service_account: %s", err) + } + } + if instanceTemplate.Properties.GuestAccelerators != nil { + if err = d.Set("guest_accelerator", flattenGuestAccelerators(instanceTemplate.Properties.GuestAccelerators)); err != nil { + return fmt.Errorf("Error setting guest_accelerator: %s", err) + } + } + return nil +} + +func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.InstanceTemplates.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance template: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Deleting Instance Template") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/provider/terraform/resources/resource_compute_instance_template_migrate.go b/provider/terraform/resources/resource_compute_instance_template_migrate.go new file mode 100644 index 000000000000..0b28502773b6 --- /dev/null +++ b/provider/terraform/resources/resource_compute_instance_template_migrate.go @@ -0,0 +1,58 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeInstanceTemplateMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance Template State v0; migrating to v1") + return migrateComputeInstanceTemplateStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateComputeInstanceTemplateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // automatic_restart is stored in two places. The top-level automatic_restart value is deprecated, so let's delete + // it from the state map for now. For paranoia's sake, we compare it to the value stored in scheduling as well. + ar := is.Attributes["automatic_restart"] + delete(is.Attributes, "automatic_restart") + + schedulingCount, ok := is.Attributes["scheduling.#"] + if ok && schedulingCount != "0" && schedulingCount != "1" { + return nil, fmt.Errorf("Found multiple scheduling blocks when there should only be one") + } + + if !ok || schedulingCount == "0" { + // Either scheduling is missing or empty; go ahead and add + is.Attributes["scheduling.#"] = "1" + is.Attributes["scheduling.0.automatic_restart"] = ar + } + + schedAr := is.Attributes["scheduling.0.automatic_restart"] + if ar != schedAr { + // Here we could try to choose one value over the other, but in reality they should never be out of sync; error + // for now + return nil, fmt.Errorf("Found differing values for automatic_restart in state, unsure how to proceed. automatic_restart = %#v, scheduling.0.automatic_restart = %#v", ar, schedAr) + } + + // We also nuke "on_host_maintenance" as it's been deprecated as well. Here we don't check the current value though + // as the authoritative value has always been maintained in the scheduling block. + delete(is.Attributes, "on_host_maintenance") + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/provider/terraform/resources/resource_compute_network.go b/provider/terraform/resources/resource_compute_network.go new file mode 100644 index 000000000000..db1efeba9c88 --- /dev/null +++ b/provider/terraform/resources/resource_compute_network.go @@ -0,0 +1,205 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeNetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkCreate, + Read: resourceComputeNetworkRead, + Update: resourceComputeNetworkUpdate, + Delete: resourceComputeNetworkDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "auto_create_subnetworks": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "routing_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ipv4_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + // This needs to remain deprecated until the API is retired + Removed: "Please use google_compute_subnetwork resources instead.", + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // + // Possible modes: + // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be + // set (enforced by ConflictsWith schema attribute) + // - 2 Distributed Mode - Create a new generation network that supports subnetworks: + // - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region + // - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, + // + autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool) + + // Build the network parameter + network := &compute.Network{ + Name: d.Get("name").(string), + AutoCreateSubnetworks: autoCreateSubnetworks, + Description: d.Get("description").(string), + } + + if v, ok := d.GetOk("routing_mode"); ok { + routingConfig := &compute.NetworkRoutingConfig{ + RoutingMode: v.(string), + } + network.RoutingConfig = routingConfig + } + + // make sure AutoCreateSubnetworks field is included in request otherwise + // google will create a network in legacy mode. + network.ForceSendFields = []string{"AutoCreateSubnetworks"} + + log.Printf("[DEBUG] Network insert request: %#v", network) + op, err := config.clientCompute.Networks.Insert( + project, network).Do() + if err != nil { + return fmt.Errorf("Error creating network: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(network.Name) + + err = computeOperationWait(config.clientCompute, op, project, "Creating Network") + if err != nil { + return err + } + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + network, err := config.clientCompute.Networks.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Network %q", d.Get("name").(string))) + } + + routingConfig := network.RoutingConfig + + d.Set("routing_mode", routingConfig.RoutingMode) + d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("self_link", network.SelfLink) + d.Set("name", network.Name) + d.Set("description", network.Description) + d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks) + d.Set("project", project) + + return nil +} + +func resourceComputeNetworkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.Networks.Patch(project, d.Id(), &compute.Network{ + RoutingConfig: &compute.NetworkRoutingConfig{ + RoutingMode: d.Get("routing_mode").(string), + }, + }).Do() + + if err != nil { + return fmt.Errorf("Error updating network: %s", err) + } + + err = computeSharedOperationWait(config.clientCompute, op, project, "UpdateNetwork") + if err != nil { + return err + } + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + return deleteComputeNetwork(project, d.Id(), config) +} + +func deleteComputeNetwork(project, network string, config *Config) error { + op, err := config.clientCompute.Networks.Delete( + project, network).Do() + if err != nil { + return fmt.Errorf("Error deleting network: %s", err) + } + + err = computeOperationWaitTime(config.clientCompute, op, project, "Deleting Network", 10) + if err != nil { + return err + } + return nil +} diff --git a/provider/terraform/resources/resource_compute_network_peering.go b/provider/terraform/resources/resource_compute_network_peering.go new file mode 100644 index 000000000000..bbb2dc7bc2fb --- /dev/null +++ b/provider/terraform/resources/resource_compute_network_peering.go @@ -0,0 +1,173 @@ +package google + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +const peerNetworkLinkRegex = "projects/(" + ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" + +func resourceComputeNetworkPeering() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkPeeringCreate, + Read: resourceComputeNetworkPeeringRead, + Delete: resourceComputeNetworkPeeringDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCPName, + }, + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRegexp(peerNetworkLinkRegex), + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + "peer_network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRegexp(peerNetworkLinkRegex), + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + "auto_create_routes": &schema.Schema{ + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Default: true, + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "state_details": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeNetworkPeeringCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksAddPeeringRequest{ + Name: d.Get("name").(string), + PeerNetwork: d.Get("peer_network").(string), + AutoCreateRoutes: d.Get("auto_create_routes").(bool), + } + + addOp, err := config.clientCompute.Networks.AddPeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + return fmt.Errorf("Error adding network peering: %s", err) + } + + err = computeOperationWait(config.clientCompute, addOp, networkFieldValue.Project, "Adding Network Peering") + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", networkFieldValue.Name, d.Get("name").(string))) + + return resourceComputeNetworkPeeringRead(d, meta) +} + +func resourceComputeNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + peeringName := d.Get("name").(string) + networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + + network, err := config.clientCompute.Networks.Get(networkFieldValue.Project, networkFieldValue.Name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Network %q", networkFieldValue.Name)) + } + + peering := findPeeringFromNetwork(network, peeringName) + if peering == nil { + log.Printf("[WARN] Removing network peering %s from network %s because it's gone", peeringName, network.Name) + d.SetId("") + return nil + } + + d.Set("peer_network", peering.Network) + d.Set("auto_create_routes", peering.AutoCreateRoutes) + d.Set("state", peering.State) + d.Set("state_details", peering.StateDetails) + + return nil +} + +func resourceComputeNetworkPeeringDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Remove the `network` to `peer_network` peering + name := d.Get("name").(string) + networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksRemovePeeringRequest{ + Name: name, + } + + // Only one delete peering operation at a time can be performed inside any peered VPCs. + peeringLockName := getNetworkPeeringLockName(networkFieldValue.Name, peerNetworkFieldValue.Name) + mutexKV.Lock(peeringLockName) + defer mutexKV.Unlock(peeringLockName) + + removeOp, err := config.clientCompute.Networks.RemovePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Peering `%s` already removed from network `%s`", name, networkFieldValue.Name) + } else { + return fmt.Errorf("Error removing peering `%s` from network `%s`: %s", name, networkFieldValue.Name, err) + } + } else { + err = computeOperationWait(config.clientCompute, removeOp, networkFieldValue.Project, "Removing Network Peering") + if err != nil { + return err + } + } + + return nil +} + +func findPeeringFromNetwork(network *compute.Network, peeringName string) *compute.NetworkPeering { + for _, p := range network.Peerings { + if p.Name == peeringName { + return p + } + } + return nil +} + +func getNetworkPeeringLockName(networkName, peerNetworkName string) string { + // Whether you delete the peering from network A to B or the one from B to A, they + // cannot happen at the same time. + networks := []string{networkName, peerNetworkName} + sort.Strings(networks) + + return fmt.Sprintf("network_peering/%s/%s", networks[0], networks[1]) +} diff --git a/provider/terraform/resources/resource_compute_project_metadata.go b/provider/terraform/resources/resource_compute_project_metadata.go new file mode 100644 index 000000000000..497005ffb80b --- /dev/null +++ b/provider/terraform/resources/resource_compute_project_metadata.go @@ -0,0 +1,119 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeProjectMetadata() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeProjectMetadataCreateOrUpdate, + Read: resourceComputeProjectMetadataRead, + Update: resourceComputeProjectMetadataCreateOrUpdate, + Delete: resourceComputeProjectMetadataDelete, + + SchemaVersion: 0, + + Schema: map[string]*schema.Schema{ + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeProjectMetadataCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + md := &compute.Metadata{ + Items: expandComputeMetadata(d.Get("metadata").(map[string]interface{})), + } + + err = resourceComputeProjectMetadataSet(projectID, config, md) + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + return resourceComputeProjectMetadataRead(d, meta) +} + +func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + // Load project service + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectID)) + } + + err = d.Set("metadata", flattenMetadata(project.CommonInstanceMetadata)) + if err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + + d.Set("project", projectID) + d.SetId("common_metadata") + return nil +} + +func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + md := &compute.Metadata{} + err = resourceComputeProjectMetadataSet(projectID, config, md) + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + return resourceComputeProjectMetadataRead(d, meta) +} + +func resourceComputeProjectMetadataSet(projectID string, config *Config, md *compute.Metadata) error { + createMD := func() error { + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md.Fingerprint = project.CommonInstanceMetadata.Fingerprint + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + return computeOperationWait(config.clientCompute, op, project.Name, "SetCommonMetadata") + } + + err := MetadataRetryWrapper(createMD) + return err +} diff --git a/provider/terraform/resources/resource_compute_project_metadata_item.go b/provider/terraform/resources/resource_compute_project_metadata_item.go new file mode 100644 index 000000000000..e3d2a6071eff --- /dev/null +++ b/provider/terraform/resources/resource_compute_project_metadata_item.go @@ -0,0 +1,180 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeProjectMetadataItem() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeProjectMetadataItemCreate, + Read: resourceComputeProjectMetadataItemRead, + Update: resourceComputeProjectMetadataItemUpdate, + Delete: resourceComputeProjectMetadataItemDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeProjectMetadataItemCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + key := d.Get("key").(string) + val := d.Get("value").(string) + + err = updateComputeCommonInstanceMetadata(config, projectID, key, &val) + if err != nil { + return err + } + + d.SetId(key) + + return nil +} + +func resourceComputeProjectMetadataItemRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Loading project metadata: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md := flattenMetadata(project.CommonInstanceMetadata) + val, ok := md[d.Id()] + if !ok { + // Resource no longer exists + d.SetId("") + return nil + } + + d.Set("project", projectID) + d.Set("key", d.Id()) + d.Set("value", val) + + return nil +} + +func resourceComputeProjectMetadataItemUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + if d.HasChange("value") { + key := d.Get("key").(string) + _, n := d.GetChange("value") + new := n.(string) + + err = updateComputeCommonInstanceMetadata(config, projectID, key, &new) + if err != nil { + return err + } + } + return nil +} + +func resourceComputeProjectMetadataItemDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + key := d.Get("key").(string) + + err = updateComputeCommonInstanceMetadata(config, projectID, key, nil) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func updateComputeCommonInstanceMetadata(config *Config, projectID string, key string, afterVal *string) error { + updateMD := func() error { + log.Printf("[DEBUG] Loading project metadata: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md := flattenMetadata(project.CommonInstanceMetadata) + + val, ok := md[key] + + if !ok { + if afterVal == nil { + // Asked to set no value and we didn't find one - we're done + return nil + } + } else { + if afterVal != nil && *afterVal == val { + // Asked to set a value and it's already set - we're done. + return nil + } + } + + if afterVal == nil { + delete(md, key) + } else { + md[key] = *afterVal + } + + // Attempt to write the new value now + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata( + projectID, + &compute.Metadata{ + Fingerprint: project.CommonInstanceMetadata.Fingerprint, + Items: expandComputeMetadata(md), + }, + ).Do() + + if err != nil { + return err + } + + log.Printf("[DEBUG] SetCommonInstanceMetadata: %d (%s)", op.Id, op.SelfLink) + + return computeOperationWait(config.clientCompute, op, project.Name, "SetCommonInstanceMetadata") + } + + return MetadataRetryWrapper(updateMD) +} diff --git a/provider/terraform/resources/resource_compute_region_backend_service.go b/provider/terraform/resources/resource_compute_region_backend_service.go new file mode 100644 index 000000000000..376b08d8fbb3 --- /dev/null +++ b/provider/terraform/resources/resource_compute_region_backend_service.go @@ -0,0 +1,361 @@ +package google + +import ( + "bytes" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + computeBeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" +) + +func resourceComputeRegionBackendService() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionBackendServiceCreate, + Read: resourceComputeRegionBackendServiceRead, + Update: resourceComputeRegionBackendServiceUpdate, + Delete: resourceComputeRegionBackendServiceDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCPName, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Required: true, + MinItems: 1, + MaxItems: 1, + }, + + "backend": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Optional: true, + Set: resourceGoogleComputeRegionBackendServiceBackendHash, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "connection_draining_timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + }, + } +} + +func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := computeBeta.BackendService{ + Name: d.Get("name").(string), + HealthChecks: healthChecks, + LoadBalancingScheme: "INTERNAL", + } + + var err error + if v, ok := d.GetOk("backend"); ok { + service.Backends, err = expandBackends(v.(*schema.Set).List()) + if err != nil { + return err + } + } + + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } + + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + if v, ok := d.GetOk("connection_draining_timeout_sec"); ok { + connectionDraining := &computeBeta.ConnectionDraining{ + DrainingTimeoutSec: int64(v.(int)), + } + + service.ConnectionDraining = connectionDraining + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Region Backend Service: %#v", service) + + op, err := config.clientComputeBeta.RegionBackendServices.Insert( + project, region, &service).Do() + if err != nil { + return fmt.Errorf("Error creating backend service: %s", err) + } + + log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op) + + d.SetId(service.Name) + + err = computeSharedOperationWait(config.clientCompute, op, project, "Creating Region Backend Service") + if err != nil { + return err + } + + return resourceComputeRegionBackendServiceRead(d, meta) +} + +func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + service, err := config.clientCompute.RegionBackendServices.Get( + project, region, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Region Backend Service %q", d.Get("name").(string))) + } + + d.Set("description", service.Description) + d.Set("protocol", service.Protocol) + d.Set("session_affinity", service.SessionAffinity) + d.Set("timeout_sec", service.TimeoutSec) + d.Set("connection_draining_timeout_sec", service.ConnectionDraining.DrainingTimeoutSec) + d.Set("fingerprint", service.Fingerprint) + d.Set("self_link", service.SelfLink) + err = d.Set("backend", flattenRegionBackends(service.Backends)) + if err != nil { + return err + } + d.Set("health_checks", service.HealthChecks) + d.Set("project", project) + d.Set("region", region) + + return nil +} + +func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := computeBeta.BackendService{ + Name: d.Get("name").(string), + Fingerprint: d.Get("fingerprint").(string), + HealthChecks: healthChecks, + LoadBalancingScheme: "INTERNAL", + } + + // Optional things + if v, ok := d.GetOk("backend"); ok { + service.Backends, err = expandBackends(v.(*schema.Set).List()) + if err != nil { + return err + } + } + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + if d.HasChange("connection_draining_timeout_sec") { + connectionDraining := &computeBeta.ConnectionDraining{ + DrainingTimeoutSec: int64(d.Get("connection_draining_timeout_sec").(int)), + } + + service.ConnectionDraining = connectionDraining + } + + log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) + op, err := config.clientComputeBeta.RegionBackendServices.Update( + project, region, d.Id(), &service).Do() + if err != nil { + return fmt.Errorf("Error updating backend service: %s", err) + } + + d.SetId(service.Name) + + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating Backend Service") + if err != nil { + return err + } + + return resourceComputeRegionBackendServiceRead(d, meta) +} + +func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting backend service %s", d.Id()) + op, err := config.clientCompute.RegionBackendServices.Delete( + project, region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting backend service: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Deleting Backend Service") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if group, err := getRelativePath(m["group"].(string)); err != nil { + log.Printf("[WARN] Error on retrieving relative path of instance group: %s", err) + buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) + } else { + buf.WriteString(fmt.Sprintf("%s-", group)) + } + + if v, ok := m["description"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +func flattenRegionBackends(backends []*compute.Backend) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(backends)) + + for _, b := range backends { + data := make(map[string]interface{}) + + data["description"] = b.Description + data["group"] = b.Group + result = append(result, data) + } + + return result +} diff --git a/provider/terraform/resources/resource_compute_region_instance_group_manager.go b/provider/terraform/resources/resource_compute_region_instance_group_manager.go new file mode 100644 index 000000000000..648083b362ed --- /dev/null +++ b/provider/terraform/resources/resource_compute_region_instance_group_manager.go @@ -0,0 +1,668 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + + computeBeta "google.golang.org/api/compute/v0.beta" +) + +func resourceComputeRegionInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionInstanceGroupManagerCreate, + Read: resourceComputeRegionInstanceGroupManagerRead, + Update: resourceComputeRegionInstanceGroupManagerUpdate, + Delete: resourceComputeRegionInstanceGroupManagerDelete, + Importer: &schema.ResourceImporter{ + State: resourceRegionInstanceGroupManagerStateImporter, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(15 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "base_instance_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + + "version": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + + "target_size": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + + "percent": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100), + }, + }, + }, + }, + }, + }, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "named_port": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "update_strategy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + ValidateFunc: validation.StringInSlice([]string{"NONE", "ROLLING_UPDATE"}, false), + }, + + "target_pools": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: selfLinkRelativePathHash, + }, + "target_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + + // If true, the resource will report ready only after no instances are being created. + // This will not block future reads if instances are being recreated, and it respects + // the "createNoRetry" parameter that's available for this resource. + "wait_for_instances": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "auto_healing_policies": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "health_check": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + }, + + "initial_delay_sec": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 3600), + }, + }, + }, + }, + + "distribution_policy_zones": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Computed: true, + Set: hashZoneFromSelfLinkOrResourceName, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + }, + + "rolling_update_policy": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minimal_action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), + }, + + "max_surge_fixed": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + ConflictsWith: []string{"rolling_update_policy.0.max_surge_percent"}, + }, + + "max_surge_percent": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"rolling_update_policy.0.max_surge_fixed"}, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "max_unavailable_fixed": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + ConflictsWith: []string{"rolling_update_policy.0.max_unavailable_percent"}, + }, + + "max_unavailable_percent": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"rolling_update_policy.0.max_unavailable_fixed"}, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "min_ready_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600), + }, + }, + }, + }, + }, + } +} + +func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + if _, ok := d.GetOk("rolling_update_policy"); d.Get("update_strategy") == "ROLLING_UPDATE" && !ok { + return fmt.Errorf("[rolling_update_policy] must be set when 'update_strategy' is set to 'ROLLING_UPDATE'") + } + + manager := &computeBeta.InstanceGroupManager{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + BaseInstanceName: d.Get("base_instance_name").(string), + InstanceTemplate: d.Get("instance_template").(string), + TargetSize: int64(d.Get("target_size").(int)), + NamedPorts: getNamedPortsBeta(d.Get("named_port").([]interface{})), + TargetPools: convertStringSet(d.Get("target_pools").(*schema.Set)), + AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), + Versions: expandVersions(d.Get("version").([]interface{})), + DistributionPolicy: expandDistributionPolicy(d.Get("distribution_policy_zones").(*schema.Set)), + // Force send TargetSize to allow size of 0. + ForceSendFields: []string{"TargetSize"}, + } + + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Insert(project, d.Get("region").(string), manager).Do() + + if err != nil { + return fmt.Errorf("Error creating RegionInstanceGroupManager: %s", err) + } + + d.SetId(manager.Name) + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Creating InstanceGroupManager") + if err != nil { + return err + } + return resourceComputeRegionInstanceGroupManagerRead(d, config) +} + +type getInstanceManagerFunc func(*schema.ResourceData, interface{}) (*computeBeta.InstanceGroupManager, error) + +func getRegionalManager(d *schema.ResourceData, meta interface{}) (*computeBeta.InstanceGroupManager, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + region, err := getRegion(d, config) + if err != nil { + return nil, err + } + + manager, err := config.clientComputeBeta.RegionInstanceGroupManagers.Get(project, region, d.Id()).Do() + if err != nil { + return nil, handleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", d.Get("name").(string))) + } + + return manager, nil +} + +func waitForInstancesRefreshFunc(f getInstanceManagerFunc, d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + m, err := f(d, meta) + if err != nil { + log.Printf("[WARNING] Error in fetching manager while waiting for instances to come up: %s\n", err) + return nil, "error", err + } + if done := m.CurrentActions.None; done < m.TargetSize { + return done, "creating", nil + } else { + return done, "created", nil + } + } +} + +func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + manager, err := getRegionalManager(d, meta) + if err != nil || manager == nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Set("base_instance_name", manager.BaseInstanceName) + d.Set("instance_template", manager.InstanceTemplate) + if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { + return err + } + d.Set("name", manager.Name) + d.Set("region", GetResourceNameFromSelfLink(manager.Region)) + d.Set("description", manager.Description) + d.Set("project", project) + d.Set("target_size", manager.TargetSize) + d.Set("target_pools", manager.TargetPools) + d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)) + d.Set("fingerprint", manager.Fingerprint) + d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)) + d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)) + if err := d.Set("distribution_policy_zones", flattenDistributionPolicy(manager.DistributionPolicy)); err != nil { + return err + } + d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)) + update_strategy, ok := d.GetOk("update_strategy") + if !ok { + update_strategy = "NONE" + } + d.Set("update_strategy", update_strategy.(string)) + + if d.Get("wait_for_instances").(bool) { + conf := resource.StateChangeConf{ + Pending: []string{"creating", "error"}, + Target: []string{"created"}, + Refresh: waitForInstancesRefreshFunc(getRegionalManager, d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + } + _, err := conf.WaitForState() + if err != nil { + return err + } + } + + return nil +} + +// Updates an instance group manager by applying the update strategy (REPLACE, RESTART) +// and rolling update policy (PROACTIVE, OPPORTUNISTIC). Updates performed by API +// are OPPORTUNISTIC by default. +func performRegionUpdate(config *Config, id string, updateStrategy string, rollingUpdatePolicy *computeBeta.InstanceGroupManagerUpdatePolicy, versions []*computeBeta.InstanceGroupManagerVersion, project string, region string) error { + if updateStrategy == "RESTART" { + managedInstances, err := config.clientComputeBeta.RegionInstanceGroupManagers.ListManagedInstances(project, region, id).Do() + if err != nil { + return fmt.Errorf("Error getting region instance group managers instances: %s", err) + } + + managedInstanceCount := len(managedInstances.ManagedInstances) + instances := make([]string, managedInstanceCount) + for i, v := range managedInstances.ManagedInstances { + instances[i] = v.Instance + } + + recreateInstances := &computeBeta.RegionInstanceGroupManagersRecreateRequest{ + Instances: instances, + } + + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.RecreateInstances(project, region, id, recreateInstances).Do() + if err != nil { + return fmt.Errorf("Error restarting region instance group managers instances: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWaitTime(config.clientCompute, op, project, managedInstanceCount*4, "Restarting RegionInstanceGroupManagers instances") + if err != nil { + return err + } + } + + if updateStrategy == "ROLLING_UPDATE" { + // UpdatePolicy is set for InstanceGroupManager on update only, because it is only relevant for `Patch` calls. + // Other tools(gcloud and UI) capable of executing the same `ROLLING UPDATE` call + // expect those values to be provided by user as part of the call + // or provide their own defaults without respecting what was previously set on UpdateManager. + // To follow the same logic, we provide policy values on relevant update change only. + manager := &computeBeta.InstanceGroupManager{ + UpdatePolicy: rollingUpdatePolicy, + Versions: versions, + } + + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Patch(project, region, id, manager).Do() + if err != nil { + return fmt.Errorf("Error updating region managed group instances: %s", err) + } + + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating region managed group instances") + if err != nil { + return err + } + } + + return nil +} + +func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + + d.Partial(true) + + if _, ok := d.GetOk("rolling_update_policy"); d.Get("update_strategy") == "ROLLING_UPDATE" && !ok { + return fmt.Errorf("[rolling_update_policy] must be set when 'update_strategy' is set to 'ROLLING_UPDATE'") + } + + if d.HasChange("target_pools") { + targetPools := convertStringSet(d.Get("target_pools").(*schema.Set)) + + // Build the parameter + setTargetPools := &computeBeta.RegionInstanceGroupManagersSetTargetPoolsRequest{ + Fingerprint: d.Get("fingerprint").(string), + TargetPools: targetPools, + } + + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.SetTargetPools( + project, region, d.Id(), setTargetPools).Do() + + if err != nil { + return fmt.Errorf("Error updating RegionInstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating RegionInstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("target_pools") + } + + if d.HasChange("instance_template") { + // Build the parameter + setInstanceTemplate := &computeBeta.RegionInstanceGroupManagersSetTemplateRequest{ + InstanceTemplate: d.Get("instance_template").(string), + } + + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.SetInstanceTemplate( + project, region, d.Id(), setInstanceTemplate).Do() + + if err != nil { + return fmt.Errorf("Error updating RegionInstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating InstanceGroupManager") + if err != nil { + return err + } + + updateStrategy := d.Get("update_strategy").(string) + rollingUpdatePolicy := expandUpdatePolicy(d.Get("rolling_update_policy").([]interface{})) + err = performRegionUpdate(config, d.Id(), updateStrategy, rollingUpdatePolicy, nil, project, region) + d.SetPartial("instance_template") + } + + // If version changes then update + if d.HasChange("version") { + updateStrategy := d.Get("update_strategy").(string) + rollingUpdatePolicy := expandUpdatePolicy(d.Get("rolling_update_policy").([]interface{})) + versions := expandVersions(d.Get("version").([]interface{})) + err = performRegionUpdate(config, d.Id(), updateStrategy, rollingUpdatePolicy, versions, project, region) + if err != nil { + return err + } + + d.SetPartial("version") + } + + if d.HasChange("named_port") { + // Build the parameters for a "SetNamedPorts" request: + namedPorts := getNamedPortsBeta(d.Get("named_port").([]interface{})) + setNamedPorts := &computeBeta.RegionInstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + // Make the request: + op, err := config.clientComputeBeta.RegionInstanceGroups.SetNamedPorts( + project, region, d.Id(), setNamedPorts).Do() + + if err != nil { + return fmt.Errorf("Error updating RegionInstanceGroupManager: %s", err) + } + + // Wait for the operation to complete: + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating RegionInstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("named_port") + } + + if d.HasChange("target_size") { + targetSize := int64(d.Get("target_size").(int)) + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Resize( + project, region, d.Id(), targetSize).Do() + + if err != nil { + return fmt.Errorf("Error resizing RegionInstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Resizing RegionInstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("target_size") + } + + if d.HasChange("auto_healing_policies") { + setAutoHealingPoliciesRequest := &computeBeta.RegionInstanceGroupManagersSetAutoHealingRequest{} + if v, ok := d.GetOk("auto_healing_policies"); ok { + setAutoHealingPoliciesRequest.AutoHealingPolicies = expandAutoHealingPolicies(v.([]interface{})) + } + + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.SetAutoHealingPolicies( + project, region, d.Id(), setAutoHealingPoliciesRequest).Do() + + if err != nil { + return fmt.Errorf("Error updating AutoHealingPolicies: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWait(config.clientCompute, op, project, "Updating AutoHealingPolicies") + if err != nil { + return err + } + + d.SetPartial("auto_healing_policies") + } + + d.Partial(false) + + return resourceComputeRegionInstanceGroupManagerRead(d, meta) +} + +func resourceComputeRegionInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + + op, err := config.clientComputeBeta.RegionInstanceGroupManagers.Delete(project, region, d.Id()).Do() + + if err != nil { + return fmt.Errorf("Error deleting region instance group manager: %s", err) + } + + // Wait for the operation to complete + err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutDelete).Minutes()), "Deleting RegionInstanceGroupManager") + + d.SetId("") + return nil +} + +func expandDistributionPolicy(configured *schema.Set) *computeBeta.DistributionPolicy { + if configured.Len() == 0 { + return nil + } + + distributionPolicyZoneConfigs := make([]*computeBeta.DistributionPolicyZoneConfiguration, 0, configured.Len()) + for _, raw := range configured.List() { + data := raw.(string) + distributionPolicyZoneConfig := computeBeta.DistributionPolicyZoneConfiguration{ + Zone: "zones/" + data, + } + + distributionPolicyZoneConfigs = append(distributionPolicyZoneConfigs, &distributionPolicyZoneConfig) + } + return &computeBeta.DistributionPolicy{Zones: distributionPolicyZoneConfigs} +} + +func flattenDistributionPolicy(distributionPolicy *computeBeta.DistributionPolicy) []string { + zones := make([]string, 0) + + if distributionPolicy != nil { + for _, zone := range distributionPolicy.Zones { + zones = append(zones, zone.Zone) + } + } + + return zones +} + +func hashZoneFromSelfLinkOrResourceName(value interface{}) int { + parts := strings.Split(value.(string), "/") + resource := parts[len(parts)-1] + + return hashcode.String(resource) +} + +func resourceRegionInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("wait_for_instances", false) + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_compute_router_interface.go b/provider/terraform/resources/resource_compute_router_interface.go new file mode 100644 index 000000000000..4e6151b6fa29 --- /dev/null +++ b/provider/terraform/resources/resource_compute_router_interface.go @@ -0,0 +1,273 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouterInterface() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterInterfaceCreate, + Read: resourceComputeRouterInterfaceRead, + Delete: resourceComputeRouterInterfaceDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterInterfaceImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "router": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpn_tunnel": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + + "ip_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + ifaces := router.Interfaces + for _, iface := range ifaces { + if iface.Name == ifaceName { + d.SetId("") + return fmt.Errorf("Router %s has interface %s already", routerName, ifaceName) + } + } + + vpnTunnel, err := getVpnTunnelLink(config, project, region, d.Get("vpn_tunnel").(string)) + if err != nil { + return err + } + + iface := &compute.RouterInterface{Name: ifaceName, + LinkedVpnTunnel: vpnTunnel} + + if v, ok := d.GetOk("ip_range"); ok { + iface.IpRange = v.(string) + } + + log.Printf("[INFO] Adding interface %s", ifaceName) + ifaces = append(ifaces, iface) + patchRouter := &compute.Router{ + Interfaces: ifaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + err = computeOperationWait(config.clientCompute, op, project, "Patching router") + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + return resourceComputeRouterInterfaceRead(d, meta) +} + +func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + for _, iface := range router.Interfaces { + + if iface.Name == ifaceName { + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + d.Set("vpn_tunnel", iface.LinkedVpnTunnel) + d.Set("ip_range", iface.IpRange) + d.Set("region", region) + d.Set("project", project) + return nil + } + } + + log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) + d.SetId("") + return nil +} + +func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var ifaceFound bool + + newIfaces := make([]*compute.RouterInterface, 0, len(router.Interfaces)) + for _, iface := range router.Interfaces { + + if iface.Name == ifaceName { + ifaceFound = true + continue + } else { + newIfaces = append(newIfaces, iface) + } + } + + if !ifaceFound { + log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName) + d.SetId("") + return nil + } + + log.Printf( + "[INFO] Removing interface %s from router %s/%s", ifaceName, region, routerName) + patchRouter := &compute.Router{ + Interfaces: newIfaces, + } + + if len(newIfaces) == 0 { + patchRouter.ForceSendFields = append(patchRouter.ForceSendFields, "Interfaces") + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId("") + return nil +} + +func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid router interface specifier. Expecting {region}/{router}/{interface}") + } + + d.Set("region", parts[0]) + d.Set("router", parts[1]) + d.Set("name", parts[2]) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_compute_router_peer.go b/provider/terraform/resources/resource_compute_router_peer.go new file mode 100644 index 000000000000..c32127090de6 --- /dev/null +++ b/provider/terraform/resources/resource_compute_router_peer.go @@ -0,0 +1,294 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouterPeer() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterPeerCreate, + Read: resourceComputeRouterPeerRead, + Delete: resourceComputeRouterPeerDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterPeerImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "router": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "interface": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "peer_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "peer_asn": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "advertised_route_priority": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeRouterPeerCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + peers := router.BgpPeers + for _, peer := range peers { + if peer.Name == peerName { + d.SetId("") + return fmt.Errorf("Router %s has peer %s already", routerName, peerName) + } + } + + ifaceName := d.Get("interface").(string) + + peer := &compute.RouterBgpPeer{Name: peerName, + InterfaceName: ifaceName} + + if v, ok := d.GetOk("peer_ip_address"); ok { + peer.PeerIpAddress = v.(string) + } + + if v, ok := d.GetOk("peer_asn"); ok { + peer.PeerAsn = int64(v.(int)) + } + + if v, ok := d.GetOk("advertised_route_priority"); ok { + peer.AdvertisedRoutePriority = int64(v.(int)) + } + + log.Printf("[INFO] Adding peer %s", peerName) + peers = append(peers, peer) + patchRouter := &compute.Router{ + BgpPeers: peers, + } + + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, peers) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) + err = computeOperationWait(config.clientCompute, op, project, "Patching router") + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + return resourceComputeRouterPeerRead(d, meta) +} + +func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + for _, peer := range router.BgpPeers { + + if peer.Name == peerName { + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) + d.Set("interface", peer.InterfaceName) + d.Set("peer_ip_address", peer.PeerIpAddress) + d.Set("peer_asn", peer.PeerAsn) + d.Set("advertised_route_priority", peer.AdvertisedRoutePriority) + d.Set("ip_address", peer.IpAddress) + d.Set("region", region) + d.Set("project", project) + return nil + } + } + + log.Printf("[WARN] Removing router peer %s/%s/%s because it is gone", region, routerName, peerName) + d.SetId("") + return nil +} + +func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var newPeers []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, 0, len(router.BgpPeers)) + for _, peer := range router.BgpPeers { + if peer.Name == peerName { + continue + } else { + newPeers = append(newPeers, peer) + } + } + + if len(newPeers) == len(router.BgpPeers) { + log.Printf("[DEBUG] Router %s/%s had no peer %s already", region, routerName, peerName) + d.SetId("") + return nil + } + + log.Printf( + "[INFO] Removing peer %s from router %s/%s", peerName, region, routerName) + patchRouter := &compute.Router{ + BgpPeers: newPeers, + } + + if len(newPeers) == 0 { + patchRouter.ForceSendFields = append(patchRouter.ForceSendFields, "BgpPeers") + } + + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, newPeers) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId("") + return nil +} + +func resourceComputeRouterPeerImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid router peer specifier. Expecting {region}/{router}/{peer}") + } + + d.Set("region", parts[0]) + d.Set("router", parts[1]) + d.Set("name", parts[2]) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_compute_security_policy.go b/provider/terraform/resources/resource_compute_security_policy.go new file mode 100644 index 000000000000..ff7318df1254 --- /dev/null +++ b/provider/terraform/resources/resource_compute_security_policy.go @@ -0,0 +1,365 @@ +package google + +import ( + "fmt" + "log" + + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/compute/v0.beta" +) + +func resourceComputeSecurityPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSecurityPolicyCreate, + Read: resourceComputeSecurityPolicyRead, + Update: resourceComputeSecurityPolicyUpdate, + Delete: resourceComputeSecurityPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCPName, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "rule": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, // If no rules are set, a default rule is added + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"allow", "deny(403)", "deny(404)", "deny(502)"}, false), + }, + + "priority": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "match": &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config": &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "src_ip_ranges": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 5, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "versioned_expr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"SRC_IPS_V1"}, false), + }, + }, + }, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "preview": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeSecurityPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + sp := d.Get("name").(string) + securityPolicy := &compute.SecurityPolicy{ + Name: sp, + Description: d.Get("description").(string), + } + if v, ok := d.GetOk("rule"); ok { + securityPolicy.Rules = expandSecurityPolicyRules(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] SecurityPolicy insert request: %#v", securityPolicy) + + op, err := config.clientComputeBeta.SecurityPolicies.Insert(project, securityPolicy).Do() + + if err != nil { + return errwrap.Wrapf("Error creating SecurityPolicy: {{err}}", err) + } + + d.SetId(securityPolicy.Name) + + err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), fmt.Sprintf("Creating SecurityPolicy %q", sp)) + if err != nil { + return err + } + + return resourceComputeSecurityPolicyRead(d, meta) +} + +func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + securityPolicy, err := config.clientComputeBeta.SecurityPolicies.Get(project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SecurityPolicy %q", d.Id())) + } + + d.Set("name", securityPolicy.Name) + d.Set("description", securityPolicy.Description) + if err := d.Set("rule", flattenSecurityPolicyRules(securityPolicy.Rules)); err != nil { + return err + } + d.Set("fingerprint", securityPolicy.Fingerprint) + d.Set("project", project) + d.Set("self_link", ConvertSelfLinkToV1(securityPolicy.SelfLink)) + + return nil +} + +func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + sp := d.Id() + + if d.HasChange("description") { + securityPolicy := &compute.SecurityPolicy{ + Description: d.Get("description").(string), + Fingerprint: d.Get("fingerprint").(string), + ForceSendFields: []string{"Description"}, + } + op, err := config.clientComputeBeta.SecurityPolicies.Patch(project, sp, securityPolicy).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) + } + + err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), fmt.Sprintf("Updating SecurityPolicy %q", sp)) + if err != nil { + return err + } + } + + if d.HasChange("rule") { + o, n := d.GetChange("rule") + oSet := o.(*schema.Set) + nSet := n.(*schema.Set) + + oPriorities := map[int64]bool{} + nPriorities := map[int64]bool{} + for _, rule := range oSet.List() { + oPriorities[int64(rule.(map[string]interface{})["priority"].(int))] = true + } + + for _, rule := range nSet.List() { + priority := int64(rule.(map[string]interface{})["priority"].(int)) + nPriorities[priority] = true + if !oPriorities[priority] { + // If the rule is in new and its priority does not exist in old, then add it. + op, err := config.clientComputeBeta.SecurityPolicies.AddRule(project, sp, expandSecurityPolicyRule(rule)).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) + } + + err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), fmt.Sprintf("Updating SecurityPolicy %q", sp)) + if err != nil { + return err + } + } else if !oSet.Contains(rule) { + // If the rule is in new, and its priority is in old, but its hash is different than the one in old, update it. + op, err := config.clientComputeBeta.SecurityPolicies.PatchRule(project, sp, expandSecurityPolicyRule(rule)).Priority(priority).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) + } + + err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), fmt.Sprintf("Updating SecurityPolicy %q", sp)) + if err != nil { + return err + } + } + } + + for _, rule := range oSet.List() { + priority := int64(rule.(map[string]interface{})["priority"].(int)) + if !nPriorities[priority] { + // If the rule's priority is in old but not new, remove it. + op, err := config.clientComputeBeta.SecurityPolicies.RemoveRule(project, sp).Priority(priority).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) + } + + err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutCreate).Minutes()), fmt.Sprintf("Updating SecurityPolicy %q", sp)) + if err != nil { + return err + } + } + } + } + + return resourceComputeSecurityPolicyRead(d, meta) +} + +func resourceComputeSecurityPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the SecurityPolicy + op, err := config.clientComputeBeta.SecurityPolicies.Delete(project, d.Id()).Do() + if err != nil { + return errwrap.Wrapf("Error deleting SecurityPolicy: {{err}}", err) + } + + err = computeSharedOperationWaitTime(config.clientCompute, op, project, int(d.Timeout(schema.TimeoutDelete).Minutes()), "Deleting SecurityPolicy") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func expandSecurityPolicyRules(configured []interface{}) []*compute.SecurityPolicyRule { + rules := make([]*compute.SecurityPolicyRule, 0, len(configured)) + for _, raw := range configured { + rules = append(rules, expandSecurityPolicyRule(raw)) + } + return rules +} + +func expandSecurityPolicyRule(raw interface{}) *compute.SecurityPolicyRule { + data := raw.(map[string]interface{}) + return &compute.SecurityPolicyRule{ + Description: data["description"].(string), + Priority: int64(data["priority"].(int)), + Action: data["action"].(string), + Preview: data["preview"].(bool), + Match: expandSecurityPolicyMatch(data["match"].([]interface{})), + ForceSendFields: []string{"Description", "Preview"}, + } +} + +func expandSecurityPolicyMatch(configured []interface{}) *compute.SecurityPolicyRuleMatcher { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleMatcher{ + VersionedExpr: data["versioned_expr"].(string), + Config: expandSecurityPolicyMatchConfig(data["config"].([]interface{})), + } +} + +func expandSecurityPolicyMatchConfig(configured []interface{}) *compute.SecurityPolicyRuleMatcherConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleMatcherConfig{ + SrcIpRanges: convertStringArr(data["src_ip_ranges"].(*schema.Set).List()), + } +} + +func flattenSecurityPolicyRules(rules []*compute.SecurityPolicyRule) []map[string]interface{} { + rulesSchema := make([]map[string]interface{}, 0, len(rules)) + for _, rule := range rules { + data := map[string]interface{}{ + "description": rule.Description, + "priority": rule.Priority, + "action": rule.Action, + "preview": rule.Preview, + "match": []map[string]interface{}{ + { + "versioned_expr": rule.Match.VersionedExpr, + "config": []map[string]interface{}{ + { + "src_ip_ranges": schema.NewSet(schema.HashString, convertStringArrToInterface(rule.Match.Config.SrcIpRanges)), + }, + }, + }, + }, + } + + rulesSchema = append(rulesSchema, data) + } + return rulesSchema +} diff --git a/provider/terraform/resources/resource_compute_shared_vpc_host_project.go b/provider/terraform/resources/resource_compute_shared_vpc_host_project.go new file mode 100644 index 000000000000..07f00857dad0 --- /dev/null +++ b/provider/terraform/resources/resource_compute_shared_vpc_host_project.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeSharedVpcHostProject() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSharedVpcHostProjectCreate, + Read: resourceComputeSharedVpcHostProjectRead, + Delete: resourceComputeSharedVpcHostProjectDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeSharedVpcHostProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hostProject := d.Get("project").(string) + op, err := config.clientCompute.Projects.EnableXpnHost(hostProject).Do() + if err != nil { + return fmt.Errorf("Error enabling Shared VPC Host %q: %s", hostProject, err) + } + + d.SetId(hostProject) + + err = computeOperationWait(config.clientCompute, op, hostProject, "Enabling Shared VPC Host") + if err != nil { + d.SetId("") + return err + } + + return nil +} + +func resourceComputeSharedVpcHostProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hostProject := d.Id() + + project, err := config.clientCompute.Projects.Get(hostProject).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Project data for project %q", hostProject)) + } + + if project.XpnProjectStatus != "HOST" { + log.Printf("[WARN] Removing Shared VPC host resource %q because it's not enabled server-side", hostProject) + d.SetId("") + } + + d.Set("project", hostProject) + + return nil +} + +func resourceComputeSharedVpcHostProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + hostProject := d.Get("project").(string) + + op, err := config.clientCompute.Projects.DisableXpnHost(hostProject).Do() + if err != nil { + return fmt.Errorf("Error disabling Shared VPC Host %q: %s", hostProject, err) + } + + err = computeOperationWait(config.clientCompute, op, hostProject, "Disabling Shared VPC Host") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/provider/terraform/resources/resource_compute_shared_vpc_service_project.go b/provider/terraform/resources/resource_compute_shared_vpc_service_project.go new file mode 100644 index 000000000000..8ddba80e8e50 --- /dev/null +++ b/provider/terraform/resources/resource_compute_shared_vpc_service_project.go @@ -0,0 +1,133 @@ +package google + +import ( + "fmt" + "strings" + + "google.golang.org/api/compute/v1" + + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" +) + +func resourceComputeSharedVpcServiceProject() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSharedVpcServiceProjectCreate, + Read: resourceComputeSharedVpcServiceProjectRead, + Delete: resourceComputeSharedVpcServiceProjectDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "host_project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "service_project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeSharedVpcServiceProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hostProject := d.Get("host_project").(string) + serviceProject := d.Get("service_project").(string) + + req := &compute.ProjectsEnableXpnResourceRequest{ + XpnResource: &compute.XpnResourceId{ + Id: serviceProject, + Type: "PROJECT", + }, + } + op, err := config.clientCompute.Projects.EnableXpnResource(hostProject, req).Do() + if err != nil { + return err + } + if err = computeOperationWait(config.clientCompute, op, hostProject, "Enabling Shared VPC Resource"); err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", hostProject, serviceProject)) + + return nil +} + +func resourceComputeSharedVpcServiceProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + split := strings.Split(d.Id(), "/") + if len(split) != 2 { + return fmt.Errorf("Error parsing resource ID %s", d.Id()) + } + hostProject := split[0] + serviceProject := split[1] + + associatedHostProject, err := config.clientCompute.Projects.GetXpnHost(serviceProject).Do() + if err != nil { + log.Printf("[WARN] Removing shared VPC service. The service project is not associated with any host") + + d.SetId("") + return nil + } + + if hostProject != associatedHostProject.Name { + log.Printf("[WARN] Removing shared VPC service. Expected associated host project to be '%s', got '%s'", hostProject, associatedHostProject.Name) + d.SetId("") + return nil + } + + d.Set("host_project", hostProject) + d.Set("service_project", serviceProject) + + return nil +} + +func resourceComputeSharedVpcServiceProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + hostProject := d.Get("host_project").(string) + serviceProject := d.Get("service_project").(string) + + if err := disableXpnResource(config, hostProject, serviceProject); err != nil { + // Don't fail if the service project is already disabled. + if !isDisabledXpnResourceError(err) { + return fmt.Errorf("Error disabling Shared VPC Resource %q: %s", serviceProject, err) + } + } + + return nil +} + +func disableXpnResource(config *Config, hostProject, project string) error { + req := &compute.ProjectsDisableXpnResourceRequest{ + XpnResource: &compute.XpnResourceId{ + Id: project, + Type: "PROJECT", + }, + } + op, err := config.clientCompute.Projects.DisableXpnResource(hostProject, req).Do() + if err != nil { + return err + } + if err = computeOperationWait(config.clientCompute, op, hostProject, "Disabling Shared VPC Resource"); err != nil { + return err + } + return nil +} + +func isDisabledXpnResourceError(err error) bool { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "invalidResourceUsage" { + return true + } + } + return false +} diff --git a/provider/terraform/resources/resource_compute_snapshot.go b/provider/terraform/resources/resource_compute_snapshot.go new file mode 100644 index 000000000000..cd38ba8262ba --- /dev/null +++ b/provider/terraform/resources/resource_compute_snapshot.go @@ -0,0 +1,266 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSnapshotCreate, + Read: resourceComputeSnapshotRead, + Delete: resourceComputeSnapshotDelete, + Update: resourceComputeSnapshotUpdate, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "snapshot_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "snapshot_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "source_disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "source_disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "source_disk": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "source_disk_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "label_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + } +} + +func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the snapshot parameter + snapshot := &compute.Snapshot{ + Name: d.Get("name").(string), + } + + source_disk := d.Get("source_disk").(string) + + if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok { + snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{} + snapshot.SnapshotEncryptionKey.RawKey = v.(string) + } + + if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok { + snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{} + snapshot.SourceDiskEncryptionKey.RawKey = v.(string) + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.Disks.CreateSnapshot( + project, zone, source_disk, snapshot).Do() + if err != nil { + return fmt.Errorf("Error creating snapshot: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(snapshot.Name) + + timeout := int(d.Timeout(schema.TimeoutCreate).Minutes()) + err = computeOperationWaitTime(config.clientCompute, op, project, "Creating Snapshot", timeout) + if err != nil { + return err + } + + // Now if labels are set, go ahead and apply them + if labels := expandLabels(d); len(labels) > 0 { + // First, read the remote resource in order to find the fingerprint + apiSnapshot, err := config.clientCompute.Snapshots.Get(project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Eror when reading snapshot for label update: %s", err) + } + + err = updateLabels(config.clientCompute, project, d.Id(), labels, apiSnapshot.LabelFingerprint, timeout) + if err != nil { + return err + } + } + return resourceComputeSnapshotRead(d, meta) +} + +func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + + snapshot, err := config.clientCompute.Snapshots.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Snapshot %q", d.Get("name").(string))) + } + + d.Set("self_link", snapshot.SelfLink) + d.Set("source_disk_link", snapshot.SourceDisk) + d.Set("name", snapshot.Name) + + if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" { + d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256) + } + + if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" { + d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256) + } + + d.Set("labels", snapshot.Labels) + d.Set("label_fingerprint", snapshot.LabelFingerprint) + d.Set("project", project) + d.Set("zone", zone) + + return nil +} + +func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("labels") { + err = updateLabels(config.clientCompute, project, d.Id(), expandLabels(d), d.Get("label_fingerprint").(string), int(d.Timeout(schema.TimeoutDelete).Minutes())) + if err != nil { + return err + } + + d.SetPartial("labels") + } + + d.Partial(false) + + return resourceComputeSnapshotRead(d, meta) +} + +func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the snapshot + op, err := config.clientCompute.Snapshots.Delete( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + return nil + } + return fmt.Errorf("Error deleting snapshot: %s", err) + } + + err = computeOperationWaitTime(config.clientCompute, op, project, "Deleting Snapshot", int(d.Timeout(schema.TimeoutDelete).Minutes())) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func updateLabels(client *compute.Service, project string, resourceId string, labels map[string]string, labelFingerprint string, timeout int) error { + setLabelsReq := compute.GlobalSetLabelsRequest{ + Labels: labels, + LabelFingerprint: labelFingerprint, + } + op, err := client.Snapshots.SetLabels(project, resourceId, &setLabelsReq).Do() + if err != nil { + return err + } + + return computeOperationWaitTime(client, op, project, "Setting labels on snapshot", timeout) +} diff --git a/provider/terraform/resources/resource_compute_target_pool.go b/provider/terraform/resources/resource_compute_target_pool.go new file mode 100644 index 000000000000..33ef305c92ad --- /dev/null +++ b/provider/terraform/resources/resource_compute_target_pool.go @@ -0,0 +1,446 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +var instancesSelfLinkPattern = regexp.MustCompile(fmt.Sprintf(zonalLinkBasePattern, "instances")) + +func resourceComputeTargetPool() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetPoolCreate, + Read: resourceComputeTargetPoolRead, + Delete: resourceComputeTargetPoolDelete, + Update: resourceComputeTargetPoolUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "backup_pool": { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "failover_ratio": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + }, + + "health_checks": { + Type: schema.TypeList, + Optional: true, + ForceNew: false, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + }, + + "instances": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // instances are stored in state as "zone/name" + oldParts := strings.Split(old, "/") + + // instances can also be specified in the config as a URL + if parts := instancesSelfLinkPattern.FindStringSubmatch(new); len(oldParts) == 2 && len(parts) == 4 { + // parts[0] = full match + // parts[1] = project + // parts[2] = zone + // parts[3] = instance name + + oZone, oName := oldParts[0], oldParts[1] + nZone, nName := parts[2], parts[3] + if oZone == nZone && oName == nName { + return true + } + } + return false + }, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "NONE", + }, + }, + } +} + +// Healthchecks need to exist before being referred to from the target pool. +func convertHealthChecks(healthChecks []interface{}, d *schema.ResourceData, config *Config) ([]string, error) { + if healthChecks == nil || len(healthChecks) == 0 { + return []string{}, nil + } + + hc, err := ParseHttpHealthCheckFieldValue(healthChecks[0].(string), d, config) + if err != nil { + return nil, err + } + + return []string{hc.RelativeLink()}, nil +} + +// Instances do not need to exist yet, so we simply generate URLs. +// Instances can be full URLS or zone/name +func convertInstancesToUrls(config *Config, project string, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { + urls[i] = name + } else { + splitName := strings.Split(name, "/") + if len(splitName) != 2 { + return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) + } else { + urls[i] = fmt.Sprintf( + "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", + project, splitName[0], splitName[1]) + } + } + } + return urls, nil +} + +func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + hchkUrls, err := convertHealthChecks(d.Get("health_checks").([]interface{}), d, config) + if err != nil { + return err + } + + instanceUrls, err := convertInstancesToUrls( + config, project, convertStringArr(d.Get("instances").([]interface{}))) + if err != nil { + return err + } + + // Build the parameter + tpool := &compute.TargetPool{ + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), + SessionAffinity: d.Get("session_affinity").(string), + } + if d.Get("failover_ratio") != nil { + tpool.FailoverRatio = d.Get("failover_ratio").(float64) + } + log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) + op, err := config.clientCompute.TargetPools.Insert( + project, region, tpool).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 && strings.Contains(gerr.Message, "httpHealthChecks") { + return fmt.Errorf("Health check %s is not a valid HTTP health check", d.Get("health_checks").([]interface{})[0]) + } + return fmt.Errorf("Error creating TargetPool: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(tpool.Name) + + err = computeOperationWait(config.clientCompute, op, project, "Creating Target Pool") + if err != nil { + return err + } + return resourceComputeTargetPoolRead(d, meta) +} + +func calcAddRemove(from []string, to []string) ([]string, []string) { + add := make([]string, 0) + remove := make([]string, 0) + for _, u := range to { + found := false + for _, v := range from { + if u == v { + found = true + break + } + } + if !found { + add = append(add, u) + } + } + for _, u := range from { + found := false + for _, v := range to { + if u == v { + found = true + break + } + } + if !found { + remove = append(remove, u) + } + } + return add, remove +} + +func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("health_checks") { + + from_, to_ := d.GetChange("health_checks") + fromUrls, err := convertHealthChecks(from_.([]interface{}), d, config) + if err != nil { + return err + } + toUrls, err := convertHealthChecks(to_.([]interface{}), d, config) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(remove)), + } + for i, v := range remove { + removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err := config.clientCompute.TargetPools.RemoveHealthCheck( + project, region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Updating Target Pool") + if err != nil { + return err + } + addReq := &compute.TargetPoolsAddHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(add)), + } + for i, v := range add { + addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err = config.clientCompute.TargetPools.AddHealthCheck( + project, region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Updating Target Pool") + if err != nil { + return err + } + d.SetPartial("health_checks") + } + + if d.HasChange("instances") { + + from_, to_ := d.GetChange("instances") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertInstancesToUrls(config, project, from) + if err != nil { + return err + } + toUrls, err := convertInstancesToUrls(config, project, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + addReq := &compute.TargetPoolsAddInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(add)), + } + for i, v := range add { + addReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err := config.clientCompute.TargetPools.AddInstance( + project, region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Updating Target Pool") + if err != nil { + return err + } + removeReq := &compute.TargetPoolsRemoveInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(remove)), + } + for i, v := range remove { + removeReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err = config.clientCompute.TargetPools.RemoveInstance( + project, region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + err = computeOperationWait(config.clientCompute, op, project, "Updating Target Pool") + if err != nil { + return err + } + d.SetPartial("instances") + } + + if d.HasChange("backup_pool") { + bpool_name := d.Get("backup_pool").(string) + tref := &compute.TargetReference{ + Target: bpool_name, + } + op, err := config.clientCompute.TargetPools.SetBackup( + project, region, d.Id(), tref).Do() + if err != nil { + return fmt.Errorf("Error updating backup_pool: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Updating Target Pool") + if err != nil { + return err + } + d.SetPartial("backup_pool") + } + + d.Partial(false) + + return resourceComputeTargetPoolRead(d, meta) +} + +func convertInstancesFromUrls(urls []string) []string { + result := make([]string, 0, len(urls)) + for _, url := range urls { + urlArray := strings.Split(url, "/") + instance := fmt.Sprintf("%s/%s", urlArray[len(urlArray)-3], urlArray[len(urlArray)-1]) + result = append(result, instance) + } + return result +} + +func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + tpool, err := config.clientCompute.TargetPools.Get( + project, region, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) + } + + d.Set("self_link", tpool.SelfLink) + d.Set("backup_pool", tpool.BackupPool) + d.Set("description", tpool.Description) + d.Set("failover_ratio", tpool.FailoverRatio) + d.Set("health_checks", tpool.HealthChecks) + if tpool.Instances != nil { + d.Set("instances", convertInstancesFromUrls(tpool.Instances)) + } else { + d.Set("instances", nil) + } + d.Set("name", tpool.Name) + d.Set("region", GetResourceNameFromSelfLink(tpool.Region)) + d.Set("session_affinity", tpool.SessionAffinity) + d.Set("project", project) + return nil +} + +func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the TargetPool + op, err := config.clientCompute.TargetPools.Delete( + project, region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetPool: %s", err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Deleting Target Pool") + if err != nil { + return err + } + d.SetId("") + return nil +} diff --git a/provider/terraform/resources/resource_compute_url_map.go b/provider/terraform/resources/resource_compute_url_map.go new file mode 100644 index 000000000000..b807cb7f868a --- /dev/null +++ b/provider/terraform/resources/resource_compute_url_map.go @@ -0,0 +1,689 @@ +package google + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeUrlMap() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeUrlMapCreate, + Read: resourceComputeUrlMapRead, + Update: resourceComputeUrlMapUpdate, + Delete: resourceComputeUrlMapDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeUrlMapImportState, + }, + + Schema: map[string]*schema.Schema{ + "default_service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "host_rule": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + // TODO(evandbrown): Enable when lists support validation + //ValidateFunc: validateHostRules, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "hosts": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "path_matcher": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "map_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "path_matcher": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path_rule": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "paths": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "test": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func createHostRule(v interface{}) *compute.HostRule { + _hostRule := v.(map[string]interface{}) + + _hosts := _hostRule["hosts"].([]interface{}) + hosts := make([]string, len(_hosts)) + + for i, v := range _hosts { + hosts[i] = v.(string) + } + + pathMatcher := _hostRule["path_matcher"].(string) + + hostRule := &compute.HostRule{ + Hosts: hosts, + PathMatcher: pathMatcher, + } + + if v, ok := _hostRule["description"]; ok { + hostRule.Description = v.(string) + } + + return hostRule +} + +func createPathMatcher(v interface{}) *compute.PathMatcher { + _pathMatcher := v.(map[string]interface{}) + + _pathRules := _pathMatcher["path_rule"].([]interface{}) + pathRules := make([]*compute.PathRule, len(_pathRules)) + + for ip, vp := range _pathRules { + _pathRule := vp.(map[string]interface{}) + + _paths := _pathRule["paths"].([]interface{}) + paths := make([]string, len(_paths)) + + for ipp, vpp := range _paths { + paths[ipp] = vpp.(string) + } + + service := _pathRule["service"].(string) + + pathRule := &compute.PathRule{ + Paths: paths, + Service: service, + } + + pathRules[ip] = pathRule + } + + name := _pathMatcher["name"].(string) + defaultService := _pathMatcher["default_service"].(string) + + pathMatcher := &compute.PathMatcher{ + PathRules: pathRules, + Name: name, + DefaultService: defaultService, + } + + if vp, okp := _pathMatcher["description"]; okp { + pathMatcher.Description = vp.(string) + } + + return pathMatcher +} + +func createUrlMapTest(v interface{}) *compute.UrlMapTest { + _test := v.(map[string]interface{}) + + host := _test["host"].(string) + path := _test["path"].(string) + service := _test["service"].(string) + + test := &compute.UrlMapTest{ + Host: host, + Path: path, + Service: service, + } + + if vp, okp := _test["description"]; okp { + test.Description = vp.(string) + } + + return test +} + +func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + defaultService := d.Get("default_service").(string) + + urlMap := &compute.UrlMap{ + Name: name, + DefaultService: defaultService, + } + + if v, ok := d.GetOk("description"); ok { + urlMap.Description = v.(string) + } + + _hostRules := d.Get("host_rule").(*schema.Set) + urlMap.HostRules = make([]*compute.HostRule, _hostRules.Len()) + + for i, v := range _hostRules.List() { + urlMap.HostRules[i] = createHostRule(v) + } + + _pathMatchers := d.Get("path_matcher").([]interface{}) + urlMap.PathMatchers = make([]*compute.PathMatcher, len(_pathMatchers)) + + for i, v := range _pathMatchers { + urlMap.PathMatchers[i] = createPathMatcher(v) + } + + _tests := make([]interface{}, 0) + if v, ok := d.GetOk("test"); ok { + _tests = v.([]interface{}) + } + urlMap.Tests = make([]*compute.UrlMapTest, len(_tests)) + + for i, v := range _tests { + urlMap.Tests[i] = createUrlMapTest(v) + } + + op, err := config.clientCompute.UrlMaps.Insert(project, urlMap).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert Url Map %s: %s", name, err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Insert Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to insert Url Map %s: %s", name, err) + } + + return resourceComputeUrlMapRead(d, meta) +} + +func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("URL Map %q", d.Get("name").(string))) + } + + d.SetId(name) + d.Set("project", project) + d.Set("self_link", urlMap.SelfLink) + d.Set("map_id", strconv.FormatUint(urlMap.Id, 10)) + d.Set("fingerprint", urlMap.Fingerprint) + d.Set("default_service", urlMap.DefaultService) + + hostRuleMap := make(map[string]*compute.HostRule) + for _, v := range urlMap.HostRules { + hostRuleMap[v.PathMatcher] = v + } + + /* Only read host rules into our TF state that we have defined */ + _hostRules := d.Get("host_rule").(*schema.Set).List() + _newHostRules := make([]interface{}, 0) + for _, v := range _hostRules { + _hostRule := v.(map[string]interface{}) + _pathMatcher := _hostRule["path_matcher"].(string) + + /* Delete local entries that are no longer found on the GCE server */ + if hostRule, ok := hostRuleMap[_pathMatcher]; ok { + _newHostRule := make(map[string]interface{}) + _newHostRule["path_matcher"] = _pathMatcher + + hostsSet := make(map[string]bool) + for _, host := range hostRule.Hosts { + hostsSet[host] = true + } + + /* Only store hosts we are keeping track of */ + _newHosts := make([]interface{}, 0) + for _, vp := range _hostRule["hosts"].([]interface{}) { + if _, okp := hostsSet[vp.(string)]; okp { + _newHosts = append(_newHosts, vp) + } + } + + _newHostRule["hosts"] = _newHosts + _newHostRule["description"] = hostRule.Description + + _newHostRules = append(_newHostRules, _newHostRule) + } + } + + d.Set("host_rule", _newHostRules) + + pathMatcherMap := make(map[string]*compute.PathMatcher) + for _, v := range urlMap.PathMatchers { + pathMatcherMap[v.Name] = v + } + + /* Only read path matchers into our TF state that we have defined */ + _pathMatchers := d.Get("path_matcher").([]interface{}) + _newPathMatchers := make([]interface{}, 0) + for _, v := range _pathMatchers { + _pathMatcher := v.(map[string]interface{}) + _name := _pathMatcher["name"].(string) + + if pathMatcher, ok := pathMatcherMap[_name]; ok { + _newPathMatcher := make(map[string]interface{}) + _newPathMatcher["name"] = _name + _newPathMatcher["default_service"] = pathMatcher.DefaultService + _newPathMatcher["description"] = pathMatcher.Description + + _newPathRules := make([]interface{}, len(pathMatcher.PathRules)) + for ip, pathRule := range pathMatcher.PathRules { + _newPathRule := make(map[string]interface{}) + _newPathRule["service"] = pathRule.Service + _paths := make([]interface{}, len(pathRule.Paths)) + + for ipp, vpp := range pathRule.Paths { + _paths[ipp] = vpp + } + + _newPathRule["paths"] = _paths + + _newPathRules[ip] = _newPathRule + } + + _newPathMatcher["path_rule"] = _newPathRules + _newPathMatchers = append(_newPathMatchers, _newPathMatcher) + } + } + + d.Set("path_matcher", _newPathMatchers) + + testMap := make(map[string]*compute.UrlMapTest) + for _, v := range urlMap.Tests { + testMap[fmt.Sprintf("%s/%s", v.Host, v.Path)] = v + } + + _tests := make([]interface{}, 0) + /* Only read tests into our TF state that we have defined */ + if v, ok := d.GetOk("test"); ok { + _tests = v.([]interface{}) + } + _newTests := make([]interface{}, 0) + for _, v := range _tests { + _test := v.(map[string]interface{}) + _host := _test["host"].(string) + _path := _test["path"].(string) + + /* Delete local entries that are no longer found on the GCE server */ + if test, ok := testMap[fmt.Sprintf("%s/%s", _host, _path)]; ok { + _newTest := make(map[string]interface{}) + _newTest["host"] = _host + _newTest["path"] = _path + _newTest["description"] = test.Description + _newTest["service"] = test.Service + + _newTests = append(_newTests, _newTest) + } + } + + d.Set("test", _newTests) + + return nil +} + +func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() + if err != nil { + return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) + } + + urlMap.DefaultService = d.Get("default_service").(string) + + if v, ok := d.GetOk("description"); ok { + urlMap.Description = v.(string) + } + + if d.HasChange("host_rule") { + _oldHostRules, _newHostRules := d.GetChange("host_rule") + _oldHostRulesMap := make(map[string]interface{}) + _newHostRulesMap := make(map[string]interface{}) + + for _, v := range _oldHostRules.(*schema.Set).List() { + _hostRule := v.(map[string]interface{}) + _oldHostRulesMap[_hostRule["path_matcher"].(string)] = v + } + + for _, v := range _newHostRules.(*schema.Set).List() { + _hostRule := v.(map[string]interface{}) + _newHostRulesMap[_hostRule["path_matcher"].(string)] = v + } + + newHostRules := make([]*compute.HostRule, 0) + /* Decide which host rules to keep */ + for _, v := range urlMap.HostRules { + /* If it's in the old state, we have ownership over the host rule */ + if vOld, ok := _oldHostRulesMap[v.PathMatcher]; ok { + if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { + /* Adjust for any changes made to this rule */ + _newHostRule := vNew.(map[string]interface{}) + _oldHostRule := vOld.(map[string]interface{}) + _newHostsSet := make(map[string]bool) + _oldHostsSet := make(map[string]bool) + + hostRule := &compute.HostRule{ + PathMatcher: v.PathMatcher, + } + + for _, v := range _newHostRule["hosts"].([]interface{}) { + _newHostsSet[v.(string)] = true + } + + for _, v := range _oldHostRule["hosts"].([]interface{}) { + _oldHostsSet[v.(string)] = true + } + + /* Only add hosts that have been added locally or are new, + * not touching those from the GCE server state */ + for _, host := range v.Hosts { + _, okNew := _newHostsSet[host] + _, okOld := _oldHostsSet[host] + + /* Drop deleted hosts */ + if okOld && !okNew { + continue + } + + hostRule.Hosts = append(hostRule.Hosts, host) + + /* Kep track of the fact that this host was added */ + delete(_newHostsSet, host) + } + + /* Now add in the brand new entries */ + for host, _ := range _newHostsSet { + hostRule.Hosts = append(hostRule.Hosts, host) + } + + if v, ok := _newHostRule["description"]; ok { + hostRule.Description = v.(string) + } + + newHostRules = append(newHostRules, hostRule) + + /* Record that we've include this host rule */ + delete(_newHostRulesMap, v.PathMatcher) + } else { + /* It's been deleted */ + continue + } + } else { + if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { + newHostRules = append(newHostRules, createHostRule(vNew)) + + /* Record that we've include this host rule */ + delete(_newHostRulesMap, v.PathMatcher) + } else { + /* It wasn't created or modified locally */ + newHostRules = append(newHostRules, v) + } + } + } + + /* Record brand new host rules (ones not deleted above) */ + for _, v := range _newHostRulesMap { + newHostRules = append(newHostRules, createHostRule(v)) + } + + urlMap.HostRules = newHostRules + } + + if d.HasChange("path_matcher") { + _oldPathMatchers, _newPathMatchers := d.GetChange("path_matcher") + _oldPathMatchersMap := make(map[string]interface{}) + _newPathMatchersMap := make(map[string]interface{}) + + for _, v := range _oldPathMatchers.([]interface{}) { + _pathMatcher := v.(map[string]interface{}) + _oldPathMatchersMap[_pathMatcher["name"].(string)] = v + } + + for _, v := range _newPathMatchers.([]interface{}) { + _pathMatcher := v.(map[string]interface{}) + _newPathMatchersMap[_pathMatcher["name"].(string)] = v + } + + newPathMatchers := make([]*compute.PathMatcher, 0) + /* Decide which path matchers to keep */ + for _, v := range urlMap.PathMatchers { + /* If it's in the old state, we have ownership over the host rule */ + _, okOld := _oldPathMatchersMap[v.Name] + vNew, okNew := _newPathMatchersMap[v.Name] + + /* Drop deleted entries */ + if okOld && !okNew { + continue + } + + /* Don't change entries that don't belong to us */ + if !okNew { + newPathMatchers = append(newPathMatchers, v) + } else { + newPathMatchers = append(newPathMatchers, createPathMatcher(vNew)) + + delete(_newPathMatchersMap, v.Name) + } + } + + /* Record brand new host rules */ + for _, v := range _newPathMatchersMap { + newPathMatchers = append(newPathMatchers, createPathMatcher(v)) + } + + urlMap.PathMatchers = newPathMatchers + } + + if d.HasChange("test") { + _oldTests, _newTests := d.GetChange("test") + _oldTestsMap := make(map[string]interface{}) + _newTestsMap := make(map[string]interface{}) + + for _, v := range _oldTests.([]interface{}) { + _test := v.(map[string]interface{}) + ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) + _oldTestsMap[ident] = v + } + + for _, v := range _newTests.([]interface{}) { + _test := v.(map[string]interface{}) + ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) + _newTestsMap[ident] = v + } + + newTests := make([]*compute.UrlMapTest, 0) + /* Decide which path matchers to keep */ + for _, v := range urlMap.Tests { + ident := fmt.Sprintf("%s/%s", v.Host, v.Path) + /* If it's in the old state, we have ownership over the host rule */ + _, okOld := _oldTestsMap[ident] + vNew, okNew := _newTestsMap[ident] + + /* Drop deleted entries */ + if okOld && !okNew { + continue + } + + /* Don't change entries that don't belong to us */ + if !okNew { + newTests = append(newTests, v) + } else { + newTests = append(newTests, createUrlMapTest(vNew)) + + delete(_newTestsMap, ident) + } + } + + /* Record brand new host rules */ + for _, v := range _newTestsMap { + newTests = append(newTests, createUrlMapTest(v)) + } + + urlMap.Tests = newTests + } + op, err := config.clientCompute.UrlMaps.Update(project, urlMap.Name, urlMap).Do() + + if err != nil { + return fmt.Errorf("Error, failed to update Url Map %s: %s", name, err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Update Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to update Url Map %s: %s", name, err) + } + + return resourceComputeUrlMapRead(d, meta) +} + +func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + op, err := config.clientCompute.UrlMaps.Delete(project, name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete Url Map %s: %s", name, err) + } + + err = computeOperationWait(config.clientCompute, op, project, "Delete Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to delete Url Map %s: %s", name, err) + } + + return nil +} + +func resourceComputeUrlMapImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_container_cluster.go b/provider/terraform/resources/resource_container_cluster.go new file mode 100644 index 000000000000..e686f9e656d0 --- /dev/null +++ b/provider/terraform/resources/resource_container_cluster.go @@ -0,0 +1,1766 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + containerBeta "google.golang.org/api/container/v1beta1" +) + +var ( + instanceGroupManagerURL = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/v1/projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", ProjectRegex)) + + networkConfig = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_blocks": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + MaxItems: 20, + Elem: cidrBlockConfig, + }, + }, + } + cidrBlockConfig = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.CIDRNetwork(0, 32), + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + }, + }, + } + + ipAllocationSubnetFields = []string{"ip_allocation_policy.0.create_subnetwork", "ip_allocation_policy.0.subnetwork_name"} + ipAllocationCidrBlockFields = []string{"ip_allocation_policy.0.cluster_ipv4_cidr_block", "ip_allocation_policy.0.services_ipv4_cidr_block"} + ipAllocationRangeFields = []string{"ip_allocation_policy.0.cluster_secondary_range_name", "ip_allocation_policy.0.services_secondary_range_name"} +) + +func resourceContainerCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerClusterCreate, + Read: resourceContainerClusterRead, + Update: resourceContainerClusterUpdate, + Delete: resourceContainerClusterDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + SchemaVersion: 1, + MigrateState: resourceContainerClusterMigrateState, + + Importer: &schema.ResourceImporter{ + State: resourceContainerClusterStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 40 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 40 characters", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "region": { + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"zone"}, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"region"}, + }, + + "additional_zones": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "addons_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_load_balancing": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "horizontal_pod_autoscaling": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "kubernetes_dashboard": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "network_policy_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + + "cluster_ipv4_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: orEmpty(validateRFC1918Network(8, 32)), + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "enable_binary_authorization": { + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "enable_kubernetes_alpha": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "enable_tpu": { + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "enable_legacy_abac": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "initial_node_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "logging_service": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"logging.googleapis.com", "logging.googleapis.com/kubernetes", "none"}, false), + }, + + "maintenance_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "daily_maintenance_window": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateRFC3339Time, + DiffSuppressFunc: rfc3339TimeDiffSuppress, + }, + "duration": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + + "master_auth": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + + "username": { + Type: schema.TypeString, + Required: true, + }, + + "client_certificate_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + DiffSuppressFunc: masterAuthClientCertCfgSuppress, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issue_client_certificate": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + DiffSuppressFunc: masterAuthClientCertCfgSuppress, + }, + }, + }, + }, + + "client_certificate": { + Type: schema.TypeString, + Computed: true, + }, + + "client_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + + "cluster_ca_certificate": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "master_authorized_networks_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: networkConfig, + }, + + "min_master_version": { + Type: schema.TypeString, + Optional: true, + }, + + "monitoring_service": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"monitoring.googleapis.com", "monitoring.googleapis.com/kubernetes", "none"}, false), + }, + + "network": { + Type: schema.TypeString, + Optional: true, + Default: "default", + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "network_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "provider": { + Type: schema.TypeString, + Default: "PROVIDER_UNSPECIFIED", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"PROVIDER_UNSPECIFIED", "CALICO"}, false), + DiffSuppressFunc: emptyOrDefaultStringSuppress("PROVIDER_UNSPECIFIED"), + }, + }, + }, + }, + + "node_config": schemaNodeConfig, + + "node_pool": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools + Elem: &schema.Resource{ + Schema: schemaNodePool, + }, + }, + + "node_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "pod_security_policy_config": { + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + DiffSuppressFunc: podSecurityPolicyCfgSuppress, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + }, + + "instance_group_urls": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "master_version": { + Type: schema.TypeString, + Computed: true, + }, + + "ip_allocation_policy": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // GKE creates subnetwork automatically + "create_subnetwork": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + ConflictsWith: append(ipAllocationCidrBlockFields, ipAllocationRangeFields...), + }, + "subnetwork_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: append(ipAllocationCidrBlockFields, ipAllocationRangeFields...), + }, + + // GKE creates/deletes secondary ranges in VPC + "cluster_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: append(ipAllocationSubnetFields, ipAllocationRangeFields...), + DiffSuppressFunc: cidrOrSizeDiffSuppress, + }, + "services_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: append(ipAllocationSubnetFields, ipAllocationRangeFields...), + DiffSuppressFunc: cidrOrSizeDiffSuppress, + }, + + // User manages secondary ranges manually + "cluster_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: append(ipAllocationSubnetFields, ipAllocationCidrBlockFields...), + }, + "services_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: append(ipAllocationSubnetFields, ipAllocationCidrBlockFields...), + }, + }, + }, + }, + + "remove_default_node_pool": { + Type: schema.TypeBool, + Optional: true, + }, + + "private_cluster": { + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "master_ipv4_cidr_block": { + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.CIDRNetwork(28, 28), + }, + + "resource_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func cidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // If the user specified a size and the API returned a full cidr block, suppress. + return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) +} + +func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + location, err := getLocation(d, config) + if err != nil { + return err + } + + clusterName := d.Get("name").(string) + + cluster := &containerBeta.Cluster{ + Name: clusterName, + InitialNodeCount: int64(d.Get("initial_node_count").(int)), + MaintenancePolicy: expandMaintenancePolicy(d.Get("maintenance_policy")), + MasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(d.Get("master_authorized_networks_config")), + InitialClusterVersion: d.Get("min_master_version").(string), + ClusterIpv4Cidr: d.Get("cluster_ipv4_cidr").(string), + Description: d.Get("description").(string), + LegacyAbac: &containerBeta.LegacyAbac{ + Enabled: d.Get("enable_legacy_abac").(bool), + ForceSendFields: []string{"Enabled"}, + }, + LoggingService: d.Get("logging_service").(string), + MonitoringService: d.Get("monitoring_service").(string), + NetworkPolicy: expandNetworkPolicy(d.Get("network_policy")), + AddonsConfig: expandClusterAddonsConfig(d.Get("addons_config")), + EnableKubernetesAlpha: d.Get("enable_kubernetes_alpha").(bool), + EnableTpu: d.Get("enable_tpu").(bool), + IpAllocationPolicy: expandIPAllocationPolicy(d.Get("ip_allocation_policy")), + PodSecurityPolicyConfig: expandPodSecurityPolicyConfig(d.Get("pod_security_policy_config")), + MasterIpv4CidrBlock: d.Get("master_ipv4_cidr_block").(string), + BinaryAuthorization: &containerBeta.BinaryAuthorization{ + Enabled: d.Get("enable_binary_authorization").(bool), + ForceSendFields: []string{"Enabled"}, + }, + MasterAuth: expandMasterAuth(d.Get("master_auth")), + ResourceLabels: expandStringMap(d, "resource_labels"), + } + + // Only allow setting node_version on create if it's set to the equivalent master version, + // since `InitialClusterVersion` only accepts valid master-style versions. + if v, ok := d.GetOk("node_version"); ok { + // ignore -gke.X suffix for now. if it becomes a problem later, we can fix it. + mv := strings.Split(cluster.InitialClusterVersion, "-")[0] + nv := strings.Split(v.(string), "-")[0] + if mv != nv { + return fmt.Errorf("node_version and min_master_version must be set to equivalent values on create") + } + } + + if v, ok := d.GetOk("additional_zones"); ok { + locationsSet := v.(*schema.Set) + if locationsSet.Contains(location) { + return fmt.Errorf("additional_zones should not contain the original 'zone'") + } + if isZone(location) { + // GKE requires a full list of locations (including the original zone), + // but our schema only asks for additional zones, so append the original. + locationsSet.Add(location) + } + cluster.Locations = convertStringSet(locationsSet) + } + + if v, ok := d.GetOk("network"); ok { + network, err := ParseNetworkFieldValue(v.(string), d, config) + if err != nil { + return err + } + cluster.Network = network.RelativeLink() + } + + if v, ok := d.GetOk("subnetwork"); ok { + subnetwork, err := ParseSubnetworkFieldValue(v.(string), d, config) + if err != nil { + return err + } + cluster.Subnetwork = subnetwork.RelativeLink() + } + + nodePoolsCount := d.Get("node_pool.#").(int) + if nodePoolsCount > 0 { + nodePools := make([]*containerBeta.NodePool, 0, nodePoolsCount) + for i := 0; i < nodePoolsCount; i++ { + prefix := fmt.Sprintf("node_pool.%d.", i) + nodePool, err := expandNodePool(d, prefix) + if err != nil { + return err + } + nodePools = append(nodePools, nodePool) + } + cluster.NodePools = nodePools + } else { + // Node Configs have default values that are set in the expand function, + // but can only be set if node pools are unspecified. + cluster.NodeConfig = expandNodeConfig([]interface{}{}) + } + + if v, ok := d.GetOk("node_config"); ok { + cluster.NodeConfig = expandNodeConfig(v) + } + + if v, ok := d.GetOk("private_cluster"); ok { + if cluster.PrivateCluster = v.(bool); cluster.PrivateCluster { + if cluster.MasterIpv4CidrBlock == "" { + return fmt.Errorf("master_ipv4_cidr_block is mandatory when private_cluster=true") + } + if cluster.IpAllocationPolicy == nil { + return fmt.Errorf("ip_allocation_policy is mandatory when private_cluster=true") + } + } + } + + req := &containerBeta.CreateClusterRequest{ + Cluster: cluster, + } + + mutexKV.Lock(containerClusterMutexKey(project, location, clusterName)) + defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName)) + + parent := fmt.Sprintf("projects/%s/locations/%s", project, location) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.Create(parent, req).Do() + if err != nil { + return err + } + + d.SetId(clusterName) + + // Wait until it's created + timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) + waitErr := containerSharedOperationWait(config, op, project, location, "creating GKE cluster", timeoutInMinutes, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] GKE cluster %s has been created", clusterName) + + if d.Get("remove_default_node_pool").(bool) { + parent := fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") + op, err = config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Delete(parent).Do() + if err != nil { + return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) + } + err = containerSharedOperationWait(config, op, project, location, "removing default node pool", timeoutInMinutes, 3) + if err != nil { + return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) + } + } + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + location, err := getLocation(d, config) + if err != nil { + return err + } + + cluster := &containerBeta.Cluster{} + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + name := containerClusterFullName(project, location, d.Get("name").(string)) + cluster, err = config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do() + if err != nil { + return resource.NonRetryableError(err) + } + if cluster.Status != "RUNNING" { + return resource.RetryableError(fmt.Errorf("Cluster %q has status %q with message %q", d.Get("name"), cluster.Status, cluster.StatusMessage)) + } + return nil + }) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) + } + + d.Set("name", cluster.Name) + if err := d.Set("network_policy", flattenNetworkPolicy(cluster.NetworkPolicy)); err != nil { + return err + } + d.Set("zone", cluster.Zone) + + locations := schema.NewSet(schema.HashString, convertStringArrToInterface(cluster.Locations)) + locations.Remove(cluster.Zone) // Remove the original zone since we only store additional zones + d.Set("additional_zones", locations) + + d.Set("endpoint", cluster.Endpoint) + if err := d.Set("maintenance_policy", flattenMaintenancePolicy(cluster.MaintenancePolicy)); err != nil { + return err + } + if err := d.Set("master_auth", flattenMasterAuth(cluster.MasterAuth)); err != nil { + return err + } + if err := d.Set("master_authorized_networks_config", flattenMasterAuthorizedNetworksConfig(cluster.MasterAuthorizedNetworksConfig)); err != nil { + return err + } + d.Set("initial_node_count", cluster.InitialNodeCount) + d.Set("master_version", cluster.CurrentMasterVersion) + d.Set("node_version", cluster.CurrentNodeVersion) + d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr) + d.Set("description", cluster.Description) + d.Set("enable_kubernetes_alpha", cluster.EnableKubernetesAlpha) + d.Set("enable_tpu", cluster.EnableTpu) + d.Set("enable_legacy_abac", cluster.LegacyAbac.Enabled) + d.Set("logging_service", cluster.LoggingService) + d.Set("monitoring_service", cluster.MonitoringService) + d.Set("network", cluster.NetworkConfig.Network) + d.Set("subnetwork", cluster.NetworkConfig.Subnetwork) + d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled) + if err := d.Set("node_config", flattenNodeConfig(cluster.NodeConfig)); err != nil { + return err + } + d.Set("project", project) + if err := d.Set("addons_config", flattenClusterAddonsConfig(cluster.AddonsConfig)); err != nil { + return err + } + nps, err := flattenClusterNodePools(d, config, cluster.NodePools) + if err != nil { + return err + } + if err := d.Set("node_pool", nps); err != nil { + return err + } + + if err := d.Set("ip_allocation_policy", flattenIPAllocationPolicy(cluster.IpAllocationPolicy)); err != nil { + return err + } + + igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls) + if err != nil { + return err + } + if err := d.Set("instance_group_urls", igUrls); err != nil { + return err + } + + if err := d.Set("pod_security_policy_config", flattenPodSecurityPolicyConfig(cluster.PodSecurityPolicyConfig)); err != nil { + return err + } + + d.Set("private_cluster", cluster.PrivateCluster) + d.Set("master_ipv4_cidr_block", cluster.MasterIpv4CidrBlock) + d.Set("resource_labels", cluster.ResourceLabels) + + return nil +} + +func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + location, err := getLocation(d, config) + if err != nil { + return err + } + + clusterName := d.Get("name").(string) + timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) + + d.Partial(true) + + lockKey := containerClusterMutexKey(project, location, clusterName) + + updateFunc := func(req *containerBeta.UpdateClusterRequest, updateDescription string) func() error { + return func() error { + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + if err != nil { + return err + } + // Wait until it's updated + return containerSharedOperationWait(config, op, project, location, updateDescription, timeoutInMinutes, 2) + } + } + + // The ClusterUpdate object that we use for most of these updates only allows updating one field at a time, + // so we have to make separate calls for each field that we want to update. The order here is fairly arbitrary- + // if the order of updating fields does matter, it is called out explicitly. + if d.HasChange("master_authorized_networks_config") { + c := d.Get("master_authorized_networks_config") + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredMasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(c), + }, + } + + updateF := updateFunc(req, "updating GKE cluster master authorized networks") + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s master authorized networks config has been updated", d.Id()) + + d.SetPartial("master_authorized_networks_config") + } + + // The master must be updated before the nodes + if d.HasChange("min_master_version") { + desiredMasterVersion := d.Get("min_master_version").(string) + currentMasterVersion := d.Get("master_version").(string) + des, err := version.NewVersion(desiredMasterVersion) + if err != nil { + return err + } + cur, err := version.NewVersion(currentMasterVersion) + if err != nil { + return err + } + + // Only upgrade the master if the current version is lower than the desired version + if cur.LessThan(des) { + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredMasterVersion: desiredMasterVersion, + }, + } + + updateF := updateFunc(req, "updating GKE master version") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s: master has been updated to %s", d.Id(), desiredMasterVersion) + } + d.SetPartial("min_master_version") + } + + if d.HasChange("node_version") { + desiredNodeVersion := d.Get("node_version").(string) + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredNodeVersion: desiredNodeVersion, + }, + } + + updateF := updateFunc(req, "updating GKE node version") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s: nodes have been updated to %s", d.Id(), + desiredNodeVersion) + + d.SetPartial("node_version") + } + + if d.HasChange("addons_config") { + if ac, ok := d.GetOk("addons_config"); ok { + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredAddonsConfig: expandClusterAddonsConfig(ac), + }, + } + + updateF := updateFunc(req, "updating GKE cluster addons") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s addons have been updated", d.Id()) + + d.SetPartial("addons_config") + } + } + + if d.HasChange("enable_binary_authorization") { + enabled := d.Get("enable_binary_authorization").(bool) + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredBinaryAuthorization: &containerBeta.BinaryAuthorization{ + Enabled: enabled, + ForceSendFields: []string{"Enabled"}, + }, + }, + } + + updateF := updateFunc(req, "updating GKE binary authorization") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), enabled) + + d.SetPartial("enable_binary_authorization") + } + + if d.HasChange("maintenance_policy") { + var req *containerBeta.SetMaintenancePolicyRequest + if mp, ok := d.GetOk("maintenance_policy"); ok { + req = &containerBeta.SetMaintenancePolicyRequest{ + MaintenancePolicy: expandMaintenancePolicy(mp), + } + } else { + req = &containerBeta.SetMaintenancePolicyRequest{ + NullFields: []string{"MaintenancePolicy"}, + } + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetMaintenancePolicy(name, req).Do() + + if err != nil { + return err + } + + // Wait until it's updated + return containerSharedOperationWait(config, op, project, location, "updating GKE cluster maintenance policy", timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s maintenance policy has been updated", d.Id()) + + d.SetPartial("maintenance_policy") + } + + if d.HasChange("additional_zones") { + azSetOldI, azSetNewI := d.GetChange("additional_zones") + azSetNew := azSetNewI.(*schema.Set) + azSetOld := azSetOldI.(*schema.Set) + if azSetNew.Contains(location) { + return fmt.Errorf("additional_zones should not contain the original 'zone'") + } + // Since we can't add & remove zones in the same request, first add all the + // zones, then remove the ones we aren't using anymore. + azSet := azSetOld.Union(azSetNew) + + if isZone(location) { + azSet.Add(location) + } + + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredLocations: convertStringSet(azSet), + }, + } + + updateF := updateFunc(req, "updating GKE cluster locations") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + if isZone(location) { + azSetNew.Add(location) + } + if !azSet.Equal(azSetNew) { + req = &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredLocations: convertStringSet(azSetNew), + }, + } + + updateF := updateFunc(req, "updating GKE cluster locations") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + } + + log.Printf("[INFO] GKE cluster %s locations have been updated to %v", d.Id(), azSet.List()) + + d.SetPartial("additional_zones") + } + + if d.HasChange("enable_legacy_abac") { + enabled := d.Get("enable_legacy_abac").(bool) + req := &containerBeta.SetLegacyAbacRequest{ + Enabled: enabled, + ForceSendFields: []string{"Enabled"}, + } + + updateF := func() error { + log.Println("[DEBUG] updating enable_legacy_abac") + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetLegacyAbac(name, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + err = containerSharedOperationWait(config, op, project, location, "updating GKE legacy ABAC", timeoutInMinutes, 2) + log.Println("[DEBUG] done updating enable_legacy_abac") + return err + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s legacy ABAC has been updated to %v", d.Id(), enabled) + + d.SetPartial("enable_legacy_abac") + } + + if d.HasChange("monitoring_service") { + desiredMonitoringService := d.Get("monitoring_service").(string) + + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredMonitoringService: desiredMonitoringService, + }, + } + + updateF := updateFunc(req, "updating GKE cluster monitoring service") + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Monitoring service for GKE cluster %s has been updated to %s", d.Id(), + desiredMonitoringService) + + d.SetPartial("monitoring_service") + } + + if d.HasChange("network_policy") { + np := d.Get("network_policy") + req := &containerBeta.SetNetworkPolicyRequest{ + NetworkPolicy: expandNetworkPolicy(np), + } + + updateF := func() error { + log.Println("[DEBUG] updating network_policy") + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetNetworkPolicy(name, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + err = containerSharedOperationWait(config, op, project, location, "updating GKE cluster network policy", timeoutInMinutes, 2) + log.Println("[DEBUG] done updating network_policy") + return err + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Network policy for GKE cluster %s has been updated", d.Id()) + + d.SetPartial("network_policy") + + } + + if n, ok := d.GetOk("node_pool.#"); ok { + for i := 0; i < n.(int); i++ { + nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) + if err != nil { + return err + } + + if err := nodePoolUpdate(d, meta, nodePoolInfo, fmt.Sprintf("node_pool.%d.", i), timeoutInMinutes); err != nil { + return err + } + } + d.SetPartial("node_pool") + } + + if d.HasChange("logging_service") { + logging := d.Get("logging_service").(string) + + req := &containerBeta.SetLoggingServiceRequest{ + LoggingService: logging, + } + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetLogging(name, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerSharedOperationWait(config, op, project, location, "updating GKE logging service", timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s: logging service has been updated to %s", d.Id(), + logging) + d.SetPartial("logging_service") + } + + if d.HasChange("node_config") { + if d.HasChange("node_config.0.image_type") { + it := d.Get("node_config.0.image_type").(string) + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredImageType: it, + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerSharedOperationWait(config, op, project, location, "updating GKE image type", timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s: image type has been updated to %s", d.Id(), it) + } + d.SetPartial("node_config") + } + + if d.HasChange("master_auth") { + var req *containerBeta.SetMasterAuthRequest + if ma, ok := d.GetOk("master_auth"); ok { + req = &containerBeta.SetMasterAuthRequest{ + Action: "SET_USERNAME", + Update: expandMasterAuth(ma), + } + } else { + req = &containerBeta.SetMasterAuthRequest{ + Action: "SET_USERNAME", + Update: &containerBeta.MasterAuth{ + Username: "admin", + }, + } + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetMasterAuth(name, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerSharedOperationWait(config, op, project, location, "updating master auth", timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s: master auth has been updated", d.Id()) + d.SetPartial("master_auth") + } + + if d.HasChange("pod_security_policy_config") { + c := d.Get("pod_security_policy_config") + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredPodSecurityPolicyConfig: expandPodSecurityPolicyConfig(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do() + if err != nil { + return err + } + // Wait until it's updated + return containerSharedOperationWait(config, op, project, location, "updating GKE cluster pod security policy config", timeoutInMinutes, 2) + } + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s pod security policy config has been updated", d.Id()) + + d.SetPartial("pod_security_policy_config") + } + + if d.HasChange("resource_labels") { + resourceLabels := d.Get("resource_labels").(map[string]interface{}) + req := &containerBeta.SetLabelsRequest{ + ResourceLabels: convertStringMap(resourceLabels), + } + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + op, err := config.clientContainerBeta.Projects.Locations.Clusters.SetResourceLabels(name, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerSharedOperationWait(config, op, project, location, "updating GKE resource labels", timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + d.SetPartial("resource_labels") + } + + if d.HasChange("remove_default_node_pool") && d.Get("remove_default_node_pool").(bool) { + name := fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") + op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Delete(name).Do() + if err != nil { + return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) + } + err = containerSharedOperationWait(config, op, project, location, "removing default node pool", timeoutInMinutes, 3) + if err != nil { + return errwrap.Wrapf("Error deleting default node pool: {{err}}", err) + } + } + + d.Partial(false) + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + var location string + locations := []string{} + if regionName, isRegionalCluster := d.GetOk("region"); !isRegionalCluster { + location, err = getZone(d, config) + if err != nil { + return err + } + locations = append(locations, location) + } else { + location = regionName.(string) + } + + clusterName := d.Get("name").(string) + timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) + + log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) + mutexKV.Lock(containerClusterMutexKey(project, location, clusterName)) + defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName)) + + var op interface{} + var count = 0 + err = resource.Retry(30*time.Second, func() *resource.RetryError { + count++ + + name := containerClusterFullName(project, location, clusterName) + op, err = config.clientContainerBeta.Projects.Locations.Clusters.Delete(name).Do() + + if err != nil { + log.Printf("[WARNING] Cluster is still not ready to delete, retrying %s", clusterName) + return resource.RetryableError(err) + } + + if count == 15 { + return resource.NonRetryableError(fmt.Errorf("Error retrying to delete cluster %s", clusterName)) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + // Wait until it's deleted + waitErr := containerSharedOperationWait(config, op, project, location, "deleting GKE cluster", timeoutInMinutes, 3) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +// container engine's API currently mistakenly returns the instance group manager's +// URL instead of the instance group's URL in its responses. This shim detects that +// error, and corrects it, by fetching the instance group manager URL and retrieving +// the instance group manager, then using that to look up the instance group URL, which +// is then substituted. +// +// This should be removed when the API response is fixed. +func getInstanceGroupUrlsFromManagerUrls(config *Config, igmUrls []string) ([]string, error) { + instanceGroupURLs := make([]string, 0, len(igmUrls)) + for _, u := range igmUrls { + if !instanceGroupManagerURL.MatchString(u) { + instanceGroupURLs = append(instanceGroupURLs, u) + continue + } + matches := instanceGroupManagerURL.FindStringSubmatch(u) + instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() + if err != nil { + return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err) + } + instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup) + } + return instanceGroupURLs, nil +} + +func expandClusterAddonsConfig(configured interface{}) *containerBeta.AddonsConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + ac := &containerBeta.AddonsConfig{} + + if v, ok := config["http_load_balancing"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.HttpLoadBalancing = &containerBeta.HttpLoadBalancing{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + } + + if v, ok := config["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.HorizontalPodAutoscaling = &containerBeta.HorizontalPodAutoscaling{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + } + + if v, ok := config["kubernetes_dashboard"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.KubernetesDashboard = &containerBeta.KubernetesDashboard{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + } + + if v, ok := config["network_policy_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.NetworkPolicyConfig = &containerBeta.NetworkPolicyConfig{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + } + + return ac +} + +func expandIPAllocationPolicy(configured interface{}) *containerBeta.IPAllocationPolicy { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + + return &containerBeta.IPAllocationPolicy{ + UseIpAliases: true, + + CreateSubnetwork: config["create_subnetwork"].(bool), + SubnetworkName: config["subnetwork_name"].(string), + + ClusterIpv4CidrBlock: config["cluster_ipv4_cidr_block"].(string), + ServicesIpv4CidrBlock: config["services_ipv4_cidr_block"].(string), + + ClusterSecondaryRangeName: config["cluster_secondary_range_name"].(string), + ServicesSecondaryRangeName: config["services_secondary_range_name"].(string), + } +} + +func expandMaintenancePolicy(configured interface{}) *containerBeta.MaintenancePolicy { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + maintenancePolicy := l[0].(map[string]interface{}) + dailyMaintenanceWindow := maintenancePolicy["daily_maintenance_window"].([]interface{})[0].(map[string]interface{}) + startTime := dailyMaintenanceWindow["start_time"].(string) + return &containerBeta.MaintenancePolicy{ + Window: &containerBeta.MaintenanceWindow{ + DailyMaintenanceWindow: &containerBeta.DailyMaintenanceWindow{ + StartTime: startTime, + }, + }, + } +} + +func expandMasterAuth(configured interface{}) *containerBeta.MasterAuth { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + masterAuth := l[0].(map[string]interface{}) + result := &containerBeta.MasterAuth{ + Username: masterAuth["username"].(string), + Password: masterAuth["password"].(string), + } + if _, ok := masterAuth["client_certificate_config"]; ok { + if len(masterAuth["client_certificate_config"].([]interface{})) > 0 { + clientCertificateConfig := masterAuth["client_certificate_config"].([]interface{})[0].(map[string]interface{}) + if _, ok := clientCertificateConfig["issue_client_certificate"]; ok { + result.ClientCertificateConfig = &containerBeta.ClientCertificateConfig{ + IssueClientCertificate: clientCertificateConfig["issue_client_certificate"].(bool), + } + } + } + } + return result +} + +func expandMasterAuthorizedNetworksConfig(configured interface{}) *containerBeta.MasterAuthorizedNetworksConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + result := &containerBeta.MasterAuthorizedNetworksConfig{ + Enabled: true, + } + if config, ok := l[0].(map[string]interface{}); ok { + if _, ok := config["cidr_blocks"]; ok { + cidrBlocks := config["cidr_blocks"].(*schema.Set).List() + result.CidrBlocks = make([]*containerBeta.CidrBlock, 0) + for _, v := range cidrBlocks { + cidrBlock := v.(map[string]interface{}) + result.CidrBlocks = append(result.CidrBlocks, &containerBeta.CidrBlock{ + CidrBlock: cidrBlock["cidr_block"].(string), + DisplayName: cidrBlock["display_name"].(string), + }) + } + } + } + return result +} + +func expandNetworkPolicy(configured interface{}) *containerBeta.NetworkPolicy { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + result := &containerBeta.NetworkPolicy{} + config := l[0].(map[string]interface{}) + if enabled, ok := config["enabled"]; ok && enabled.(bool) { + result.Enabled = true + if provider, ok := config["provider"]; ok { + result.Provider = provider.(string) + } + } + return result +} + +func expandPodSecurityPolicyConfig(configured interface{}) *containerBeta.PodSecurityPolicyConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &containerBeta.PodSecurityPolicyConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} + +func flattenNetworkPolicy(c *containerBeta.NetworkPolicy) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + "provider": c.Provider, + }) + } else { + // Explicitly set the network policy to the default. + result = append(result, map[string]interface{}{ + "enabled": false, + "provider": "PROVIDER_UNSPECIFIED", + }) + } + return result +} + +func flattenClusterAddonsConfig(c *containerBeta.AddonsConfig) []map[string]interface{} { + result := make(map[string]interface{}) + if c == nil { + return nil + } + if c.HorizontalPodAutoscaling != nil { + result["horizontal_pod_autoscaling"] = []map[string]interface{}{ + { + "disabled": c.HorizontalPodAutoscaling.Disabled, + }, + } + } + if c.HttpLoadBalancing != nil { + result["http_load_balancing"] = []map[string]interface{}{ + { + "disabled": c.HttpLoadBalancing.Disabled, + }, + } + } + if c.KubernetesDashboard != nil { + result["kubernetes_dashboard"] = []map[string]interface{}{ + { + "disabled": c.KubernetesDashboard.Disabled, + }, + } + } + if c.NetworkPolicyConfig != nil { + result["network_policy_config"] = []map[string]interface{}{ + { + "disabled": c.NetworkPolicyConfig.Disabled, + }, + } + } + + return []map[string]interface{}{result} +} + +func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*containerBeta.NodePool) ([]map[string]interface{}, error) { + nodePools := make([]map[string]interface{}, 0, len(c)) + + for i, np := range c { + nodePool, err := flattenNodePool(d, config, np, fmt.Sprintf("node_pool.%d.", i)) + if err != nil { + return nil, err + } + nodePools = append(nodePools, nodePool) + } + + return nodePools, nil +} + +func flattenIPAllocationPolicy(c *containerBeta.IPAllocationPolicy) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "create_subnetwork": c.CreateSubnetwork, + "subnetwork_name": c.SubnetworkName, + + "cluster_ipv4_cidr_block": c.ClusterIpv4CidrBlock, + "services_ipv4_cidr_block": c.ServicesIpv4CidrBlock, + + "cluster_secondary_range_name": c.ClusterSecondaryRangeName, + "services_secondary_range_name": c.ServicesSecondaryRangeName, + }, + } +} + +func flattenMaintenancePolicy(mp *containerBeta.MaintenancePolicy) []map[string]interface{} { + if mp == nil { + return nil + } + return []map[string]interface{}{ + { + "daily_maintenance_window": []map[string]interface{}{ + { + "start_time": mp.Window.DailyMaintenanceWindow.StartTime, + "duration": mp.Window.DailyMaintenanceWindow.Duration, + }, + }, + }, + } +} + +func flattenMasterAuth(ma *containerBeta.MasterAuth) []map[string]interface{} { + if ma == nil { + return nil + } + masterAuth := []map[string]interface{}{ + { + "username": ma.Username, + "password": ma.Password, + "client_certificate": ma.ClientCertificate, + "client_key": ma.ClientKey, + "cluster_ca_certificate": ma.ClusterCaCertificate, + }, + } + if len(ma.ClientCertificate) == 0 { + masterAuth[0]["client_certificate_config"] = []map[string]interface{}{ + {"issue_client_certificate": false}, + } + } + return masterAuth +} + +func flattenMasterAuthorizedNetworksConfig(c *containerBeta.MasterAuthorizedNetworksConfig) []map[string]interface{} { + if c == nil { + return nil + } + result := make(map[string]interface{}) + if c.Enabled { + cidrBlocks := make([]interface{}, 0, len(c.CidrBlocks)) + for _, v := range c.CidrBlocks { + cidrBlocks = append(cidrBlocks, map[string]interface{}{ + "cidr_block": v.CidrBlock, + "display_name": v.DisplayName, + }) + } + result["cidr_blocks"] = schema.NewSet(schema.HashResource(cidrBlockConfig), cidrBlocks) + } + return []map[string]interface{}{result} +} + +func flattenPodSecurityPolicyConfig(c *containerBeta.PodSecurityPolicyConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + + switch len(parts) { + case 2: + if loc := parts[0]; isZone(loc) { + d.Set("zone", loc) + } else { + d.Set("region", loc) + } + d.Set("name", parts[1]) + case 3: + d.Set("project", parts[0]) + if loc := parts[1]; isZone(loc) { + d.Set("zone", loc) + } else { + d.Set("region", loc) + } + d.Set("name", parts[2]) + default: + return nil, fmt.Errorf("Invalid container cluster specifier. Expecting {zone}/{name} or {project}/{zone}/{name}") + } + + d.SetId(parts[len(parts)-1]) + return []*schema.ResourceData{d}, nil +} + +func containerClusterMutexKey(project, location, clusterName string) string { + return fmt.Sprintf("google-container-cluster/%s/%s/%s", project, location, clusterName) +} + +func containerClusterFullName(project, location, cluster string) string { + return fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, cluster) +} + +func extractNodePoolInformationFromCluster(d *schema.ResourceData, config *Config, clusterName string) (*NodePoolInformation, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + location, err := getLocation(d, config) + if err != nil { + return nil, err + } + + return &NodePoolInformation{ + project: project, + location: location, + cluster: d.Get("name").(string), + }, nil +} + +// We want to suppress diffs for empty or default client certificate configs, i.e: +// [{ "issue_client_certificate": true}] --> [] +// [] -> [{ "issue_client_certificate": true}] +func masterAuthClientCertCfgSuppress(k, old, new string, r *schema.ResourceData) bool { + var clientConfig map[string]interface{} + if v, ok := r.GetOk("master_auth"); ok { + masterAuths := v.([]interface{}) + masterAuth := masterAuths[0].(map[string]interface{}) + cfgs := masterAuth["client_certificate_config"].([]interface{}) + if len(cfgs) > 0 { + clientConfig = cfgs[0].(map[string]interface{}) + } + } + + if strings.HasSuffix(k, "client_certificate_config.#") && old == "0" && new == "1" { + // nil --> { "issue_client_certificate": true } + if issueCert, ok := clientConfig["issue_client_certificate"]; ok { + return issueCert.(bool) + } + } + + return strings.HasSuffix(k, ".issue_client_certificate") && old == "" && new == "true" +} + +func podSecurityPolicyCfgSuppress(k, old, new string, r *schema.ResourceData) bool { + if k == "pod_security_policy_config.#" && old == "1" && new == "0" { + if v, ok := r.GetOk("pod_security_policy_config"); ok { + cfgList := v.([]interface{}) + if len(cfgList) > 0 { + d := cfgList[0].(map[string]interface{}) + // Suppress if old value was {enabled == false} + return !d["enabled"].(bool) + } + } + } + return false +} diff --git a/provider/terraform/resources/resource_container_cluster_migrate.go b/provider/terraform/resources/resource_container_cluster_migrate.go new file mode 100644 index 000000000000..720936b211a9 --- /dev/null +++ b/provider/terraform/resources/resource_container_cluster_migrate.go @@ -0,0 +1,70 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func resourceContainerClusterMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Container Cluster State v0; migrating to v1") + return migrateClusterStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateClusterStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + newZones := []string{} + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "additional_zones.") { + continue + } + + if k == "additional_zones.#" { + continue + } + + // Key is now of the form additional_zones.%d + kParts := strings.Split(k, ".") + + // Sanity check: two parts should be there and should be a number + badFormat := false + if len(kParts) != 2 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf("migration error: found additional_zones key in unexpected format: %s", k) + } + + newZones = append(newZones, v) + delete(is.Attributes, k) + } + + for _, v := range newZones { + hash := schema.HashString(v) + newKey := fmt.Sprintf("additional_zones.%d", hash) + is.Attributes[newKey] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/provider/terraform/resources/resource_container_node_pool.go b/provider/terraform/resources/resource_container_node_pool.go new file mode 100644 index 000000000000..009fc788d822 --- /dev/null +++ b/provider/terraform/resources/resource_container_node_pool.go @@ -0,0 +1,750 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + containerBeta "google.golang.org/api/container/v1beta1" +) + +func resourceContainerNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerNodePoolCreate, + Read: resourceContainerNodePoolRead, + Update: resourceContainerNodePoolUpdate, + Delete: resourceContainerNodePoolDelete, + Exists: resourceContainerNodePoolExists, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + SchemaVersion: 1, + MigrateState: resourceContainerNodePoolMigrateState, + + Importer: &schema.ResourceImporter{ + State: resourceContainerNodePoolStateImporter, + }, + + Schema: mergeSchemas( + schemaNodePool, + map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "cluster": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region": &schema.Schema{ + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }), + } +} + +var schemaNodePool = map[string]*schema.Schema{ + "autoscaling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + }, + + "max_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + }, + }, + }, + + "max_pods_per_node": &schema.Schema{ + Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.", + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "instance_group_urls": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "management": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_repair": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "auto_upgrade": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Deprecated: "Use the random provider instead. See migration instructions at " + + "https://github.com/terraform-providers/terraform-provider-google/issues/1054#issuecomment-377390209", + }, + + "node_config": schemaNodeConfig, + + "node_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + }, + + "version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, +} + +type NodePoolInformation struct { + project string + location string + cluster string +} + +func (nodePoolInformation *NodePoolInformation) fullyQualifiedName(nodeName string) string { + return fmt.Sprintf( + "projects/%s/locations/%s/clusters/%s/nodePools/%s", + nodePoolInformation.project, + nodePoolInformation.location, + nodePoolInformation.cluster, + nodeName, + ) +} + +func (nodePoolInformation *NodePoolInformation) parent() string { + return fmt.Sprintf( + "projects/%s/locations/%s/clusters/%s", + nodePoolInformation.project, + nodePoolInformation.location, + nodePoolInformation.cluster, + ) +} + +func (nodePoolInformation *NodePoolInformation) lockKey() string { + return containerClusterMutexKey(nodePoolInformation.project, + nodePoolInformation.location, nodePoolInformation.cluster) +} + +func extractNodePoolInformation(d *schema.ResourceData, config *Config) (*NodePoolInformation, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + location, err := getLocation(d, config) + if err != nil { + return nil, err + } + + return &NodePoolInformation{ + project: project, + location: location, + cluster: d.Get("cluster").(string), + }, nil +} + +func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return err + } + + nodePool, err := expandNodePool(d, "") + if err != nil { + return err + } + + mutexKV.Lock(nodePoolInfo.lockKey()) + defer mutexKV.Unlock(nodePoolInfo.lockKey()) + + req := &containerBeta.CreateNodePoolRequest{ + NodePool: nodePool, + } + + timeout := d.Timeout(schema.TimeoutCreate) + startTime := time.Now() + + var operation *containerBeta.Operation + err = resource.Retry(timeout, func() *resource.RetryError { + operation, err = config.clientContainerBeta. + Projects.Locations.Clusters.NodePools.Create(nodePoolInfo.parent(), req).Do() + + if err != nil { + if isFailedPreconditionError(err) { + // We get failed precondition errors if the cluster is updating + // while we try to add the node pool. + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("error creating NodePool: %s", err) + } + timeout -= time.Since(startTime) + + d.SetId(fmt.Sprintf("%s/%s/%s", nodePoolInfo.location, nodePoolInfo.cluster, nodePool.Name)) + + waitErr := containerBetaOperationWait(config, + operation, nodePoolInfo.project, + nodePoolInfo.location, "creating GKE NodePool", int(timeout.Minutes()), 3) + + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been created", nodePool.Name) + + return resourceContainerNodePoolRead(d, meta) +} + +func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + nodePoolInfo, err := extractNodePoolInformation(d, config) + + name := getNodePoolName(d.Id()) + + if err != nil { + return err + } + + var nodePool = &containerBeta.NodePool{} + err = resource.Retry(2*time.Minute, func() *resource.RetryError { + nodePool, err = config.clientContainerBeta. + Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)).Do() + + if err != nil { + return resource.NonRetryableError(err) + } + if nodePool.Status != "RUNNING" { + return resource.RetryableError(fmt.Errorf("Nodepool %q has status %q with message %q", d.Get("name"), nodePool.Status, nodePool.StatusMessage)) + } + return nil + }) + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("NodePool %q from cluster %q", name, nodePoolInfo.cluster)) + } + + npMap, err := flattenNodePool(d, config, nodePool, "") + if err != nil { + return err + } + + for k, v := range npMap { + d.Set(k, v) + } + + if isZone(nodePoolInfo.location) { + d.Set("zone", nodePoolInfo.location) + } else { + d.Set("region", nodePoolInfo.location) + } + + d.Set("project", nodePoolInfo.project) + + return nil +} + +func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return err + } + + d.Partial(true) + if err := nodePoolUpdate(d, meta, nodePoolInfo, "", timeoutInMinutes); err != nil { + return err + } + d.Partial(false) + + return resourceContainerNodePoolRead(d, meta) +} + +func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return err + } + + name := getNodePoolName(d.Id()) + + timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) + + mutexKV.Lock(nodePoolInfo.lockKey()) + defer mutexKV.Unlock(nodePoolInfo.lockKey()) + + var op = &containerBeta.Operation{} + var count = 0 + err = resource.Retry(30*time.Second, func() *resource.RetryError { + count++ + op, err = config.clientContainerBeta.Projects.Locations. + Clusters.NodePools.Delete(nodePoolInfo.fullyQualifiedName(name)).Do() + + if err != nil { + return resource.RetryableError(err) + } + + if count == 15 { + return resource.NonRetryableError(fmt.Errorf("Error retrying to delete node pool %s", name)) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + // Wait until it's deleted + waitErr := containerBetaOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "deleting GKE NodePool", timeoutInMinutes, 2) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + config := meta.(*Config) + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return false, err + } + + name := getNodePoolName(d.Id()) + + _, err = config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)).Do() + if err != nil { + if err = handleNotFoundError(err, d, fmt.Sprintf("Container NodePool %s", name)); err == nil { + return false, nil + } + // There was some other error in reading the resource + return true, err + } + return true, nil +} + +func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + + switch len(parts) { + case 3: + location := parts[0] + if isZone(location) { + d.Set("zone", location) + } else { + d.Set("region", location) + } + + d.Set("cluster", parts[1]) + d.Set("name", parts[2]) + case 4: + d.Set("project", parts[0]) + + location := parts[1] + if isZone(location) { + d.Set("zone", location) + } else { + d.Set("region", location) + } + + d.Set("cluster", parts[2]) + d.Set("name", parts[3]) + + // override the inputted ID with the // format + d.SetId(strings.Join(parts[1:], "/")) + default: + return nil, fmt.Errorf("Invalid container cluster specifier. Expecting {zone}/{cluster}/{name} or {project}/{zone}/{cluster}/{name}") + } + + return []*schema.ResourceData{d}, nil +} + +func expandNodePool(d *schema.ResourceData, prefix string) (*containerBeta.NodePool, error) { + var name string + if v, ok := d.GetOk(prefix + "name"); ok { + if _, ok := d.GetOk(prefix + "name_prefix"); ok { + return nil, fmt.Errorf("Cannot specify both name and name_prefix for a node_pool") + } + name = v.(string) + } else if v, ok := d.GetOk(prefix + "name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + nodeCount := 0 + if initialNodeCount, ok := d.GetOk(prefix + "initial_node_count"); ok { + nodeCount = initialNodeCount.(int) + } + if nc, ok := d.GetOk(prefix + "node_count"); ok { + if nodeCount != 0 { + return nil, fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %s", name) + } + nodeCount = nc.(int) + } + + np := &containerBeta.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + Config: expandNodeConfig(d.Get(prefix + "node_config")), + Version: d.Get(prefix + "version").(string), + } + + if v, ok := d.GetOk(prefix + "autoscaling"); ok { + autoscaling := v.([]interface{})[0].(map[string]interface{}) + np.Autoscaling = &containerBeta.NodePoolAutoscaling{ + Enabled: true, + MinNodeCount: int64(autoscaling["min_node_count"].(int)), + MaxNodeCount: int64(autoscaling["max_node_count"].(int)), + ForceSendFields: []string{"MinNodeCount"}, + } + } + + if v, ok := d.GetOk(prefix + "max_pods_per_node"); ok { + np.MaxPodsConstraint = &containerBeta.MaxPodsConstraint{ + MaxPodsPerNode: int64(v.(int)), + } + } + + if v, ok := d.GetOk(prefix + "management"); ok { + managementConfig := v.([]interface{})[0].(map[string]interface{}) + np.Management = &containerBeta.NodeManagement{} + + if v, ok := managementConfig["auto_repair"]; ok { + np.Management.AutoRepair = v.(bool) + } + + if v, ok := managementConfig["auto_upgrade"]; ok { + np.Management.AutoUpgrade = v.(bool) + } + } + + return np, nil +} + +func flattenNodePool(d *schema.ResourceData, config *Config, np *containerBeta.NodePool, prefix string) (map[string]interface{}, error) { + // Node pools don't expose the current node count in their API, so read the + // instance groups instead. They should all have the same size, but in case a resize + // failed or something else strange happened, we'll just use the average size. + size := 0 + for _, url := range np.InstanceGroupUrls { + // retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers) + matches := instanceGroupManagerURL.FindStringSubmatch(url) + if len(matches) < 4 { + return nil, fmt.Errorf("Error reading instance group manage URL '%q'", url) + } + igm, err := config.clientComputeBeta.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() + if err != nil { + return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %q", err) + } + size += int(igm.TargetSize) + } + nodePool := map[string]interface{}{ + "name": np.Name, + "name_prefix": d.Get(prefix + "name_prefix"), + "initial_node_count": np.InitialNodeCount, + "node_count": size / len(np.InstanceGroupUrls), + "node_config": flattenNodeConfig(np.Config), + "instance_group_urls": np.InstanceGroupUrls, + "version": np.Version, + } + + if np.Autoscaling != nil && np.Autoscaling.Enabled { + nodePool["autoscaling"] = []map[string]interface{}{ + map[string]interface{}{ + "min_node_count": np.Autoscaling.MinNodeCount, + "max_node_count": np.Autoscaling.MaxNodeCount, + }, + } + } + + if np.MaxPodsConstraint != nil { + nodePool["max_pods_per_node"] = np.MaxPodsConstraint.MaxPodsPerNode + } + + nodePool["management"] = []map[string]interface{}{ + { + "auto_repair": np.Management.AutoRepair, + "auto_upgrade": np.Management.AutoUpgrade, + }, + } + + return nodePool, nil +} + +func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeoutInMinutes int) error { + config := meta.(*Config) + + name := d.Get(prefix + "name").(string) + + lockKey := nodePoolInfo.lockKey() + + if d.HasChange(prefix + "autoscaling") { + update := &containerBeta.ClusterUpdate{ + DesiredNodePoolId: name, + } + if v, ok := d.GetOk(prefix + "autoscaling"); ok { + autoscaling := v.([]interface{})[0].(map[string]interface{}) + update.DesiredNodePoolAutoscaling = &containerBeta.NodePoolAutoscaling{ + Enabled: true, + MinNodeCount: int64(autoscaling["min_node_count"].(int)), + MaxNodeCount: int64(autoscaling["max_node_count"].(int)), + ForceSendFields: []string{"MinNodeCount"}, + } + } else { + update.DesiredNodePoolAutoscaling = &containerBeta.NodePoolAutoscaling{ + Enabled: false, + } + } + + req := &containerBeta.UpdateClusterRequest{ + Update: update, + } + + updateF := func() error { + op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req).Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerBetaOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool", + timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id()) + + if prefix == "" { + d.SetPartial("autoscaling") + } + } + + if d.HasChange(prefix + "node_config") { + if d.HasChange(prefix + "node_config.0.image_type") { + req := &containerBeta.UpdateClusterRequest{ + Update: &containerBeta.ClusterUpdate{ + DesiredNodePoolId: name, + DesiredImageType: d.Get(prefix + "node_config.0.image_type").(string), + }, + } + + updateF := func() error { + op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req).Do() + if err != nil { + return err + } + + // Wait until it's updated + return containerBetaOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool", + timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated image type in Node Pool %s", d.Id()) + } + + if prefix == "" { + d.SetPartial("node_config") + } + } + + if d.HasChange(prefix + "node_count") { + newSize := int64(d.Get(prefix + "node_count").(int)) + req := &containerBeta.SetNodePoolSizeRequest{ + NodeCount: newSize, + } + updateF := func() error { + op, err := config.clientContainerBeta.Projects.Locations.Clusters.NodePools.SetSize(nodePoolInfo.fullyQualifiedName(name), req).Do() + + if err != nil { + return err + } + + // Wait until it's updated + return containerBetaOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool size", + timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE node pool %s size has been updated to %d", name, newSize) + + if prefix == "" { + d.SetPartial("node_count") + } + } + + if d.HasChange(prefix + "management") { + management := &containerBeta.NodeManagement{} + if v, ok := d.GetOk(prefix + "management"); ok { + managementConfig := v.([]interface{})[0].(map[string]interface{}) + management.AutoRepair = managementConfig["auto_repair"].(bool) + management.AutoUpgrade = managementConfig["auto_upgrade"].(bool) + management.ForceSendFields = []string{"AutoRepair", "AutoUpgrade"} + } + req := &containerBeta.SetNodePoolManagementRequest{ + Management: management, + } + + updateF := func() error { + op, err := config.clientContainerBeta.Projects.Locations. + Clusters.NodePools.SetManagement(nodePoolInfo.fullyQualifiedName(name), req).Do() + + if err != nil { + return err + } + + // Wait until it's updated + return containerBetaOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool management", timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated management in Node Pool %s", name) + + if prefix == "" { + d.SetPartial("management") + } + } + + if d.HasChange(prefix + "version") { + req := &containerBeta.UpdateNodePoolRequest{ + NodePoolId: name, + NodeVersion: d.Get("version").(string), + } + updateF := func() error { + op, err := config.clientContainerBeta.Projects. + Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req).Do() + + if err != nil { + return err + } + + // Wait until it's updated + return containerBetaOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool version", timeoutInMinutes, 2) + } + + // Call update serially. + if err := lockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated version in Node Pool %s", name) + + if prefix == "" { + d.SetPartial("version") + } + } + + return nil +} + +func getNodePoolName(id string) string { + // name can be specified with name, name_prefix, or neither, so read it from the id. + return strings.Split(id, "/")[2] +} diff --git a/provider/terraform/resources/resource_container_node_pool_migrate.go b/provider/terraform/resources/resource_container_node_pool_migrate.go new file mode 100644 index 000000000000..92850b672341 --- /dev/null +++ b/provider/terraform/resources/resource_container_node_pool_migrate.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/terraform" + "log" +) + +func resourceContainerNodePoolMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Container Node Pool State v0; migrating to v1") + return migrateNodePoolStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateNodePoolStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + log.Printf("[DEBUG] ID before migration: %s", is.ID) + + is.ID = fmt.Sprintf("%s/%s/%s", is.Attributes["zone"], is.Attributes["cluster"], is.Attributes["name"]) + + log.Printf("[DEBUG] ID after migration: %s", is.ID) + return is, nil +} diff --git a/provider/terraform/resources/resource_dataflow_job.go b/provider/terraform/resources/resource_dataflow_job.go new file mode 100644 index 000000000000..11da9ebb9172 --- /dev/null +++ b/provider/terraform/resources/resource_dataflow_job.go @@ -0,0 +1,261 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + + "google.golang.org/api/dataflow/v1b3" + "google.golang.org/api/googleapi" +) + +var dataflowTerminalStatesMap = map[string]struct{}{ + "JOB_STATE_DONE": {}, + "JOB_STATE_FAILED": {}, + "JOB_STATE_CANCELLED": {}, + "JOB_STATE_UPDATED": {}, + "JOB_STATE_DRAINING": {}, + "JOB_STATE_DRAINED": {}, + "JOB_STATE_CANCELLING": {}, +} + +func resourceDataflowJob() *schema.Resource { + return &schema.Resource{ + Create: resourceDataflowJobCreate, + Read: resourceDataflowJobRead, + Delete: resourceDataflowJobDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "template_gcs_path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "temp_gcs_location": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "max_workers": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + + "on_delete": &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"cancel", "drain"}, false), + Optional: true, + Default: "drain", + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := getZone(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + params := expandStringMap(d, "parameters") + + env := dataflow.RuntimeEnvironment{ + TempLocation: d.Get("temp_gcs_location").(string), + Zone: zone, + MaxWorkers: int64(d.Get("max_workers").(int)), + } + + request := dataflow.CreateJobFromTemplateRequest{ + JobName: d.Get("name").(string), + GcsPath: d.Get("template_gcs_path").(string), + Parameters: params, + Environment: &env, + } + + job, err := createJob(config, project, region, &request) + if err != nil { + return err + } + d.SetId(job.Id) + + return resourceDataflowJobRead(d, meta) +} + +func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + id := d.Id() + + job, err := getJob(config, project, region, id) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id)) + } + + d.Set("state", job.CurrentState) + d.Set("name", job.Name) + d.Set("project", project) + + if _, ok := dataflowTerminalStatesMap[job.CurrentState]; ok { + log.Printf("[DEBUG] Removing resource '%s' because it is in state %s.\n", job.Name, job.CurrentState) + d.SetId("") + return nil + } + d.SetId(job.Id) + + return nil +} + +func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + id := d.Id() + requestedState, err := mapOnDelete(d.Get("on_delete").(string)) + if err != nil { + return err + } + for _, ok := dataflowTerminalStatesMap[d.Get("state").(string)]; !ok; _, ok = dataflowTerminalStatesMap[d.Get("state").(string)] { + job := &dataflow.Job{ + RequestedState: requestedState, + } + + _, err = updateJob(config, project, region, id, job) + if err != nil { + if gerr, err_ok := err.(*googleapi.Error); !err_ok { + // If we have an error and it's not a google-specific error, we should go ahead and return. + return err + } else if err_ok && strings.Contains(gerr.Message, "not yet ready for canceling") { + // We'll sleep below to wait for the job to be ready to cancel. + } else { + return err + } + } + + err = resourceDataflowJobRead(d, meta) + postReadState := d.Get("state").(string) + log.Printf("[DEBUG] Job state: '%s'.", postReadState) + if _, ok := dataflowTerminalStatesMap[postReadState]; !ok { + // If we're not yet in a terminal state, we need to sleep a few seconds so we don't + // exhaust our update quota with repeated attempts. + time.Sleep(5 * time.Second) + } + if err != nil { + return err + } + } + + // Only remove the job from state if it's actually successfully canceled. + if _, ok := dataflowTerminalStatesMap[d.Get("state").(string)]; ok { + d.SetId("") + return nil + } + + return fmt.Errorf("There was a problem canceling the dataflow job '%s' - the final state was %s.", d.Id(), d.Get("state").(string)) + +} + +func mapOnDelete(policy string) (string, error) { + switch policy { + case "cancel": + return "JOB_STATE_CANCELLED", nil + case "drain": + return "JOB_STATE_DRAINING", nil + default: + return "", fmt.Errorf("Invalid `on_delete` policy: %s", policy) + } +} + +func createJob(config *Config, project string, region string, request *dataflow.CreateJobFromTemplateRequest) (*dataflow.Job, error) { + if region == "" { + return config.clientDataflow.Projects.Templates.Create(project, request).Do() + } + return config.clientDataflow.Projects.Locations.Templates.Create(project, region, request).Do() +} + +func getJob(config *Config, project string, region string, id string) (*dataflow.Job, error) { + if region == "" { + return config.clientDataflow.Projects.Jobs.Get(project, id).Do() + } + return config.clientDataflow.Projects.Locations.Jobs.Get(project, region, id).Do() +} + +func updateJob(config *Config, project string, region string, id string, job *dataflow.Job) (*dataflow.Job, error) { + if region == "" { + return config.clientDataflow.Projects.Jobs.Update(project, id, job).Do() + } + return config.clientDataflow.Projects.Locations.Jobs.Update(project, region, id, job).Do() +} diff --git a/provider/terraform/resources/resource_dataproc_cluster.go b/provider/terraform/resources/resource_dataproc_cluster.go new file mode 100644 index 000000000000..d3bccffc7757 --- /dev/null +++ b/provider/terraform/resources/resource_dataproc_cluster.go @@ -0,0 +1,879 @@ +package google + +import ( + "errors" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + + "google.golang.org/api/dataproc/v1" +) + +func resourceDataprocCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocClusterCreate, + Read: resourceDataprocClusterRead, + Update: resourceDataprocClusterUpdate, + Delete: resourceDataprocClusterDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 55 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 55 characters", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Default: "global", + ForceNew: true, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + // GCP automatically adds two labels + // 'goog-dataproc-cluster-uuid' + // 'goog-dataproc-cluster-name' + Computed: true, + }, + + "cluster_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "delete_autogen_bucket": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Removed: "If you need a bucket that can be deleted, please create" + + "a new one and set the `staging_bucket` field", + }, + + "staging_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + // If the user does not specify a staging bucket, GCP will allocate one automatically. + // The staging_bucket field provides a way for the user to supply their own + // staging bucket. The bucket field is purely a computed field which details + // the definitive bucket allocated and in use (either the user supplied one via + // staging_bucket, or the GCP generated one) + "bucket": { + Type: schema.TypeString, + Computed: true, + }, + + "gce_cluster_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"}, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"}, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "service_account_scopes": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + Set: stringScopeHashcode, + }, + + "internal_ip_only": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + }, + }, + }, + }, + + "master_config": instanceConfigSchema(), + "worker_config": instanceConfigSchema(), + // preemptible_worker_config has a slightly different config + "preemptible_worker_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "num_instances": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + // API does not honour this if set ... + // It always uses whatever is specified for the worker_config + // "machine_type": { ... } + + "disk_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + // API does not honour this if set ... + // It simply ignores it completely + // "num_local_ssds": { ... } + + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(10), + }, + }, + }, + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "software_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "override_properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Computed: true, + }, + + // We have two versions of the properties field here because by default + // dataproc will set a number of default properties for you out of the + // box. If you want to override one or more, if we only had one field, + // you would need to add in all these values as well otherwise you would + // get a diff. To make this easier, 'properties' simply contains the computed + // values (including overrides) for all properties, whilst override_properties + // is only for properties the user specifically wants to override. If nothing + // is overriden, this will be empty. + }, + }, + }, + + "initialization_action": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "script": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "timeout_sec": { + Type: schema.TypeInt, + Optional: true, + Default: 300, + ForceNew: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func instanceConfigSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "num_instances": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "disk_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "num_local_ssds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(10), + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), + Default: "pd-standard", + }, + }, + }, + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + } +} + +func resourceDataprocClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + cluster := &dataproc.Cluster{ + ClusterName: d.Get("name").(string), + ProjectId: project, + } + + cluster.Config, err = expandClusterConfig(d, config) + if err != nil { + return err + } + + if _, ok := d.GetOk("labels"); ok { + cluster.Labels = expandLabels(d) + } + + // Checking here caters for the case where the user does not specify cluster_config + // at all, as well where it is simply missing from the gce_cluster_config + if region == "global" && cluster.Config.GceClusterConfig.ZoneUri == "" { + return errors.New("zone is mandatory when region is set to 'global'") + } + + // Create the cluster + op, err := config.clientDataproc.Projects.Regions.Clusters.Create( + project, region, cluster).Do() + if err != nil { + return fmt.Errorf("Error creating Dataproc cluster: %s", err) + } + + d.SetId(cluster.ClusterName) + + // Wait until it's created + timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes()) + waitErr := dataprocClusterOperationWait(config, op, "creating Dataproc cluster", timeoutInMinutes, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] Dataproc cluster %s has been created", cluster.ClusterName) + return resourceDataprocClusterRead(d, meta) + +} + +func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.ClusterConfig, error) { + conf := &dataproc.ClusterConfig{ + // SDK requires GceClusterConfig to be specified, + // even if no explicit values specified + GceClusterConfig: &dataproc.GceClusterConfig{}, + } + + if v, ok := d.GetOk("cluster_config"); ok { + confs := v.([]interface{}) + if (len(confs)) == 0 { + return conf, nil + } + } + + if v, ok := d.GetOk("cluster_config.0.staging_bucket"); ok { + conf.ConfigBucket = v.(string) + } + + c, err := expandGceClusterConfig(d, config) + if err != nil { + return nil, err + } + conf.GceClusterConfig = c + + if cfg, ok := configOptions(d, "cluster_config.0.software_config"); ok { + conf.SoftwareConfig = expandSoftwareConfig(cfg) + } + + if v, ok := d.GetOk("cluster_config.0.initialization_action"); ok { + conf.InitializationActions = expandInitializationActions(v) + } + + if cfg, ok := configOptions(d, "cluster_config.0.master_config"); ok { + log.Println("[INFO] got master_config") + conf.MasterConfig = expandInstanceGroupConfig(cfg) + } + + if cfg, ok := configOptions(d, "cluster_config.0.worker_config"); ok { + log.Println("[INFO] got worker config") + conf.WorkerConfig = expandInstanceGroupConfig(cfg) + } + + if cfg, ok := configOptions(d, "cluster_config.0.preemptible_worker_config"); ok { + log.Println("[INFO] got preemtible worker config") + conf.SecondaryWorkerConfig = expandPreemptibleInstanceGroupConfig(cfg) + if conf.SecondaryWorkerConfig.NumInstances > 0 { + conf.SecondaryWorkerConfig.IsPreemptible = true + } + } + return conf, nil +} + +func expandGceClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.GceClusterConfig, error) { + conf := &dataproc.GceClusterConfig{} + + v, ok := d.GetOk("cluster_config.0.gce_cluster_config") + if !ok { + return conf, nil + } + cfg := v.([]interface{})[0].(map[string]interface{}) + + if v, ok := cfg["zone"]; ok { + conf.ZoneUri = v.(string) + } + if v, ok := cfg["network"]; ok { + nf, err := ParseNetworkFieldValue(v.(string), d, config) + if err != nil { + return nil, fmt.Errorf("cannot determine self_link for network %q: %s", v, err) + } + + conf.NetworkUri = nf.RelativeLink() + } + if v, ok := cfg["subnetwork"]; ok { + snf, err := ParseSubnetworkFieldValue(v.(string), d, config) + if err != nil { + return nil, fmt.Errorf("cannot determine self_link for subnetwork %q: %s", v, err) + } + + conf.SubnetworkUri = snf.RelativeLink() + } + if v, ok := cfg["tags"]; ok { + conf.Tags = convertStringArr(v.([]interface{})) + } + if v, ok := cfg["service_account"]; ok { + conf.ServiceAccount = v.(string) + } + if scopes, ok := cfg["service_account_scopes"]; ok { + scopesSet := scopes.(*schema.Set) + scopes := make([]string, scopesSet.Len()) + for i, scope := range scopesSet.List() { + scopes[i] = canonicalizeServiceScope(scope.(string)) + } + conf.ServiceAccountScopes = scopes + } + if v, ok := cfg["internal_ip_only"]; ok { + conf.InternalIpOnly = v.(bool) + } + if v, ok := cfg["metadata"]; ok { + conf.Metadata = convertStringMap(v.(map[string]interface{})) + } + return conf, nil +} + +func expandSoftwareConfig(cfg map[string]interface{}) *dataproc.SoftwareConfig { + conf := &dataproc.SoftwareConfig{} + if v, ok := cfg["override_properties"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + conf.Properties = m + } + if v, ok := cfg["image_version"]; ok { + conf.ImageVersion = v.(string) + } + return conf +} + +func expandInitializationActions(v interface{}) []*dataproc.NodeInitializationAction { + actionList := v.([]interface{}) + + actions := []*dataproc.NodeInitializationAction{} + for _, v1 := range actionList { + actionItem := v1.(map[string]interface{}) + action := &dataproc.NodeInitializationAction{ + ExecutableFile: actionItem["script"].(string), + } + if x, ok := actionItem["timeout_sec"]; ok { + action.ExecutionTimeout = strconv.Itoa(x.(int)) + "s" + } + actions = append(actions, action) + } + + return actions +} + +func expandPreemptibleInstanceGroupConfig(cfg map[string]interface{}) *dataproc.InstanceGroupConfig { + icg := &dataproc.InstanceGroupConfig{} + + if v, ok := cfg["num_instances"]; ok { + icg.NumInstances = int64(v.(int)) + } + if dc, ok := cfg["disk_config"]; ok { + d := dc.([]interface{}) + if len(d) > 0 { + dcfg := d[0].(map[string]interface{}) + icg.DiskConfig = &dataproc.DiskConfig{} + + if v, ok := dcfg["boot_disk_size_gb"]; ok { + icg.DiskConfig.BootDiskSizeGb = int64(v.(int)) + } + } + } + return icg +} + +func expandInstanceGroupConfig(cfg map[string]interface{}) *dataproc.InstanceGroupConfig { + icg := &dataproc.InstanceGroupConfig{} + + if v, ok := cfg["num_instances"]; ok { + icg.NumInstances = int64(v.(int)) + } + if v, ok := cfg["machine_type"]; ok { + icg.MachineTypeUri = GetResourceNameFromSelfLink(v.(string)) + } + + if dc, ok := cfg["disk_config"]; ok { + d := dc.([]interface{}) + if len(d) > 0 { + dcfg := d[0].(map[string]interface{}) + icg.DiskConfig = &dataproc.DiskConfig{} + + if v, ok := dcfg["boot_disk_size_gb"]; ok { + icg.DiskConfig.BootDiskSizeGb = int64(v.(int)) + } + if v, ok := dcfg["num_local_ssds"]; ok { + icg.DiskConfig.NumLocalSsds = int64(v.(int)) + } + if v, ok := dcfg["boot_disk_type"]; ok { + icg.DiskConfig.BootDiskType = v.(string) + } + } + } + return icg +} + +func resourceDataprocClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + clusterName := d.Get("name").(string) + timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes()) + + cluster := &dataproc.Cluster{ + ClusterName: clusterName, + ProjectId: project, + Config: &dataproc.ClusterConfig{}, + } + + updMask := []string{} + + if d.HasChange("labels") { + v := d.Get("labels") + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + cluster.Labels = m + + updMask = append(updMask, "labels") + } + + if d.HasChange("cluster_config.0.worker_config.0.num_instances") { + desiredNumWorks := d.Get("cluster_config.0.worker_config.0.num_instances").(int) + cluster.Config.WorkerConfig = &dataproc.InstanceGroupConfig{ + NumInstances: int64(desiredNumWorks), + } + + updMask = append(updMask, "config.worker_config.num_instances") + } + + if d.HasChange("cluster_config.0.preemptible_worker_config.0.num_instances") { + desiredNumWorks := d.Get("cluster_config.0.preemptible_worker_config.0.num_instances").(int) + cluster.Config.SecondaryWorkerConfig = &dataproc.InstanceGroupConfig{ + NumInstances: int64(desiredNumWorks), + } + + updMask = append(updMask, "config.secondary_worker_config.num_instances") + } + + if len(updMask) > 0 { + patch := config.clientDataproc.Projects.Regions.Clusters.Patch( + project, region, clusterName, cluster) + op, err := patch.UpdateMask(strings.Join(updMask, ",")).Do() + if err != nil { + return err + } + + // Wait until it's updated + waitErr := dataprocClusterOperationWait(config, op, "updating Dataproc cluster ", timeoutInMinutes, 2) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] Dataproc cluster %s has been updated ", d.Id()) + } + + return resourceDataprocClusterRead(d, meta) +} + +func resourceDataprocClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + clusterName := d.Get("name").(string) + + cluster, err := config.clientDataproc.Projects.Regions.Clusters.Get( + project, region, clusterName).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Cluster %q", clusterName)) + } + + d.Set("name", cluster.ClusterName) + d.Set("project", project) + d.Set("region", region) + d.Set("labels", cluster.Labels) + + cfg, err := flattenClusterConfig(d, cluster.Config) + if err != nil { + return err + } + + err = d.Set("cluster_config", cfg) + if err != nil { + return err + } + return nil +} + +func flattenClusterConfig(d *schema.ResourceData, cfg *dataproc.ClusterConfig) ([]map[string]interface{}, error) { + + data := map[string]interface{}{ + "staging_bucket": d.Get("cluster_config.0.staging_bucket").(string), + + "bucket": cfg.ConfigBucket, + "gce_cluster_config": flattenGceClusterConfig(d, cfg.GceClusterConfig), + "software_config": flattenSoftwareConfig(d, cfg.SoftwareConfig), + "master_config": flattenInstanceGroupConfig(d, cfg.MasterConfig), + "worker_config": flattenInstanceGroupConfig(d, cfg.WorkerConfig), + "preemptible_worker_config": flattenPreemptibleInstanceGroupConfig(d, cfg.SecondaryWorkerConfig), + } + + if len(cfg.InitializationActions) > 0 { + val, err := flattenInitializationActions(cfg.InitializationActions) + if err != nil { + return nil, err + } + data["initialization_action"] = val + } + return []map[string]interface{}{data}, nil +} + +func flattenSoftwareConfig(d *schema.ResourceData, sc *dataproc.SoftwareConfig) []map[string]interface{} { + data := map[string]interface{}{ + "image_version": sc.ImageVersion, + "properties": sc.Properties, + "override_properties": d.Get("cluster_config.0.software_config.0.override_properties").(map[string]interface{}), + } + + return []map[string]interface{}{data} +} + +func flattenInitializationActions(nia []*dataproc.NodeInitializationAction) ([]map[string]interface{}, error) { + + actions := []map[string]interface{}{} + for _, v := range nia { + action := map[string]interface{}{ + "script": v.ExecutableFile, + } + if len(v.ExecutionTimeout) > 0 { + tsec, err := extractInitTimeout(v.ExecutionTimeout) + if err != nil { + return nil, err + } + action["timeout_sec"] = tsec + } + + actions = append(actions, action) + } + return actions, nil + +} + +func flattenGceClusterConfig(d *schema.ResourceData, gcc *dataproc.GceClusterConfig) []map[string]interface{} { + + gceConfig := map[string]interface{}{ + "tags": gcc.Tags, + "service_account": gcc.ServiceAccount, + "zone": GetResourceNameFromSelfLink(gcc.ZoneUri), + "internal_ip_only": gcc.InternalIpOnly, + "metadata": gcc.Metadata, + } + + if gcc.NetworkUri != "" { + gceConfig["network"] = gcc.NetworkUri + } + if gcc.SubnetworkUri != "" { + gceConfig["subnetwork"] = gcc.SubnetworkUri + } + if len(gcc.ServiceAccountScopes) > 0 { + gceConfig["service_account_scopes"] = schema.NewSet(stringScopeHashcode, convertStringArrToInterface(gcc.ServiceAccountScopes)) + } + + return []map[string]interface{}{gceConfig} +} + +func flattenPreemptibleInstanceGroupConfig(d *schema.ResourceData, icg *dataproc.InstanceGroupConfig) []map[string]interface{} { + disk := map[string]interface{}{} + data := map[string]interface{}{} + + if icg != nil { + data["num_instances"] = icg.NumInstances + data["instance_names"] = icg.InstanceNames + if icg.DiskConfig != nil { + disk["boot_disk_size_gb"] = icg.DiskConfig.BootDiskSizeGb + } + } + + data["disk_config"] = []map[string]interface{}{disk} + return []map[string]interface{}{data} +} + +func flattenInstanceGroupConfig(d *schema.ResourceData, icg *dataproc.InstanceGroupConfig) []map[string]interface{} { + disk := map[string]interface{}{} + data := map[string]interface{}{} + + if icg != nil { + data["num_instances"] = icg.NumInstances + data["machine_type"] = GetResourceNameFromSelfLink(icg.MachineTypeUri) + data["instance_names"] = icg.InstanceNames + if icg.DiskConfig != nil { + disk["boot_disk_size_gb"] = icg.DiskConfig.BootDiskSizeGb + disk["num_local_ssds"] = icg.DiskConfig.NumLocalSsds + disk["boot_disk_type"] = icg.DiskConfig.BootDiskType + } + } + + data["disk_config"] = []map[string]interface{}{disk} + return []map[string]interface{}{data} +} + +func extractInitTimeout(t string) (int, error) { + d, err := time.ParseDuration(t) + if err != nil { + return 0, err + } + return int(d.Seconds()), nil +} + +func resourceDataprocClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + clusterName := d.Get("name").(string) + timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) + + log.Printf("[DEBUG] Deleting Dataproc cluster %s", clusterName) + op, err := config.clientDataproc.Projects.Regions.Clusters.Delete( + project, region, clusterName).Do() + if err != nil { + return err + } + + // Wait until it's deleted + waitErr := dataprocClusterOperationWait(config, op, "deleting Dataproc cluster", timeoutInMinutes, 3) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] Dataproc cluster %s has been deleted", d.Id()) + d.SetId("") + + return nil +} + +func configOptions(d *schema.ResourceData, option string) (map[string]interface{}, bool) { + if v, ok := d.GetOk(option); ok { + clist := v.([]interface{}) + if len(clist) == 0 { + return nil, false + } + + if clist[0] != nil { + return clist[0].(map[string]interface{}), true + } + } + return nil, false +} diff --git a/provider/terraform/resources/resource_dataproc_job.go b/provider/terraform/resources/resource_dataproc_job.go new file mode 100644 index 000000000000..6a5ef3470049 --- /dev/null +++ b/provider/terraform/resources/resource_dataproc_job.go @@ -0,0 +1,1050 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/dataproc/v1" +) + +func resourceDataprocJob() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocJobCreate, + Update: resourceDataprocJobUpdate, + Read: resourceDataprocJobRead, + Delete: resourceDataprocJobDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + // Ref: https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs#JobReference + "region": { + Type: schema.TypeString, + Optional: true, + Default: "global", + ForceNew: true, + }, + + // If a job is still running, trying to delete a job will fail. Setting + // this flag to true however will force the deletion by first cancelling + // the job and then deleting it + "force_delete": { + Type: schema.TypeBool, + Default: false, + Optional: true, + }, + + "reference": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_id": { + Type: schema.TypeString, + Description: "The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs", + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: validateRegexp("^[a-zA-Z0-9_-]{1,100}$"), + }, + }, + }, + }, + + "placement": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Description: "The name of the cluster where the job will be submitted", + Required: true, + ForceNew: true, + }, + "cluster_uuid": { + Type: schema.TypeString, + Computed: true, + Description: "Output-only. A cluster UUID generated by the Cloud Dataproc service when the job is submitted", + }, + }, + }, + }, + + "status": { + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeString, + Description: "Output-only. A state message specifying the overall job state", + Computed: true, + }, + "details": { + Type: schema.TypeString, + Description: "Output-only. Optional job state details, such as an error description if the state is ERROR", + Computed: true, + }, + "state_start_time": { + Type: schema.TypeString, + Description: "Output-only. The time when this state was entered", + Computed: true, + }, + "substate": { + Type: schema.TypeString, + Description: "Output-only. Additional state information, which includes status reported by the agent", + Computed: true, + }, + }, + }, + }, + + "driver_output_resource_uri": { + Type: schema.TypeString, + Description: "Output-only. A URI pointing to the location of the stdout of the job's driver program", + Computed: true, + }, + + "driver_controls_files_uri": { + Type: schema.TypeString, + Description: "Output-only. If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", + Computed: true, + }, + + "labels": { + Type: schema.TypeMap, + Description: "Optional. The labels to associate with this job.", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "scheduling": { + Type: schema.TypeList, + Description: "Optional. Job scheduling configuration.", + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_failures_per_hour": { + Type: schema.TypeInt, + Description: "Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.", + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntAtMost(10), + }, + }, + }, + }, + + "pyspark_config": pySparkSchema, + "spark_config": sparkSchema, + "hadoop_config": hadoopSchema, + "hive_config": hiveSchema, + "pig_config": pigSchema, + "sparksql_config": sparkSqlSchema, + }, + } +} + +func resourceDataprocJobUpdate(d *schema.ResourceData, meta interface{}) error { + // The only updatable value is currently 'force_delete' which is a local + // only value therefore we don't need to make any GCP calls to update this. + + return resourceDataprocJobRead(d, meta) +} + +func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + jobConfCount := 0 + clusterName := d.Get("placement.0.cluster_name").(string) + region := d.Get("region").(string) + + submitReq := &dataproc.SubmitJobRequest{ + Job: &dataproc.Job{ + Placement: &dataproc.JobPlacement{ + ClusterName: clusterName, + }, + Reference: &dataproc.JobReference{ + ProjectId: project, + }, + }, + } + + if v, ok := d.GetOk("reference.0.job_id"); ok { + submitReq.Job.Reference.JobId = v.(string) + } + if _, ok := d.GetOk("labels"); ok { + submitReq.Job.Labels = expandLabels(d) + } + + if v, ok := d.GetOk("pyspark_config"); ok { + jobConfCount++ + config := extractFirstMapConfig(v.([]interface{})) + submitReq.Job.PysparkJob = expandPySparkJob(config) + } + + if v, ok := d.GetOk("spark_config"); ok { + jobConfCount++ + config := extractFirstMapConfig(v.([]interface{})) + submitReq.Job.SparkJob = expandSparkJob(config) + } + + if v, ok := d.GetOk("hadoop_config"); ok { + jobConfCount++ + config := extractFirstMapConfig(v.([]interface{})) + submitReq.Job.HadoopJob = expandHadoopJob(config) + } + + if v, ok := d.GetOk("hive_config"); ok { + jobConfCount++ + config := extractFirstMapConfig(v.([]interface{})) + submitReq.Job.HiveJob = expandHiveJob(config) + } + + if v, ok := d.GetOk("pig_config"); ok { + jobConfCount++ + config := extractFirstMapConfig(v.([]interface{})) + submitReq.Job.PigJob = expandPigJob(config) + } + + if v, ok := d.GetOk("sparksql_config"); ok { + jobConfCount++ + config := extractFirstMapConfig(v.([]interface{})) + submitReq.Job.SparkSqlJob = expandSparkSqlJob(config) + } + + if jobConfCount != 1 { + return fmt.Errorf("You must define and configure exactly one xxx_config block") + } + + // Submit the job + job, err := config.clientDataproc.Projects.Regions.Jobs.Submit( + project, region, submitReq).Do() + if err != nil { + return err + } + d.SetId(job.Reference.JobId) + + log.Printf("[INFO] Dataproc job %s has been submitted", job.Reference.JobId) + return resourceDataprocJobRead(d, meta) +} + +func resourceDataprocJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + region := d.Get("region").(string) + + project, err := getProject(d, config) + if err != nil { + return err + } + + job, err := config.clientDataproc.Projects.Regions.Jobs.Get( + project, region, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", d.Id())) + } + + d.Set("force_delete", d.Get("force_delete")) + d.Set("labels", job.Labels) + d.Set("driver_output_resource_uri", job.DriverOutputResourceUri) + d.Set("driver_controls_files_uri", job.DriverControlFilesUri) + + d.Set("placement", flattenJobPlacement(job.Placement)) + d.Set("status", flattenJobStatus(job.Status)) + d.Set("reference", flattenJobReference(job.Reference)) + d.Set("project", project) + + if job.PysparkJob != nil { + d.Set("pyspark_config", flattenPySparkJob(job.PysparkJob)) + } + if job.SparkJob != nil { + d.Set("spark_config", flattenSparkJob(job.SparkJob)) + } + if job.HadoopJob != nil { + d.Set("hadoop_config", flattenHadoopJob(job.HadoopJob)) + } + if job.HiveJob != nil { + d.Set("hive_config", flattenHiveJob(job.HiveJob)) + } + if job.PigJob != nil { + d.Set("pig_config", flattenPigJob(job.PigJob)) + } + if job.SparkSqlJob != nil { + d.Set("sparksql_config", flattenSparkSqlJob(job.SparkSqlJob)) + } + return nil +} + +func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + forceDelete := d.Get("force_delete").(bool) + timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes()) + + if forceDelete { + log.Printf("[DEBUG] Attempting to first cancel Dataproc job %s if it's still running ...", d.Id()) + + config.clientDataproc.Projects.Regions.Jobs.Cancel( + project, region, d.Id(), &dataproc.CancelJobRequest{}).Do() + // ignore error if we get one - job may be finished already and not need to + // be cancelled. We do however wait for the state to be one that is + // at least not active + waitErr := dataprocJobOperationWait(config, region, project, d.Id(), + "Cancelling Dataproc job", timeoutInMinutes, 1) + if waitErr != nil { + return waitErr + } + + } + + log.Printf("[DEBUG] Deleting Dataproc job %s", d.Id()) + _, err = config.clientDataproc.Projects.Regions.Jobs.Delete( + project, region, d.Id()).Do() + if err != nil { + return err + } + + waitErr := dataprocDeleteOperationWait(config, region, project, d.Id(), + "Deleting Dataproc job", timeoutInMinutes, 1) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] Dataproc job %s has been deleted", d.Id()) + d.SetId("") + + return nil +} + +// ---- PySpark Job ---- + +var loggingConfig = &schema.Schema{ + Type: schema.TypeList, + Description: "The runtime logging config of the job", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Description: "Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'.", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, +} + +var pySparkSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_python_file_uri": { + Type: schema.TypeString, + Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file", + Required: true, + ForceNew: true, + }, + + "args": { + Type: schema.TypeList, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "python_file_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "archive_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenPySparkJob(job *dataproc.PySparkJob) []map[string]interface{} { + return []map[string]interface{}{ + { + "main_python_file_uri": job.MainPythonFileUri, + "args": job.Args, + "python_file_uris": job.PythonFileUris, + "jar_file_uris": job.JarFileUris, + "file_uris": job.FileUris, + "archive_uris": job.ArchiveUris, + "properties": job.Properties, + "logging_config": flattenLoggingConfig(job.LoggingConfig), + }, + } +} + +func expandPySparkJob(config map[string]interface{}) *dataproc.PySparkJob { + job := &dataproc.PySparkJob{} + if v, ok := config["main_python_file_uri"]; ok { + job.MainPythonFileUri = v.(string) + } + if v, ok := config["args"]; ok { + job.Args = convertStringArr(v.([]interface{})) + } + if v, ok := config["python_file_uris"]; ok { + job.PythonFileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["file_uris"]; ok { + job.FileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["archive_uris"]; ok { + job.ArchiveUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = convertStringMap(v.(map[string]interface{})) + } + if v, ok := config["logging_config"]; ok { + config := extractFirstMapConfig(v.([]interface{})) + job.LoggingConfig = expandLoggingConfig(config) + } + + return job + +} + +// ---- Spark Job ---- + +var sparkSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main driver: can be only one of the class | jar_file + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"spark_config.0.main_jar_file_uri"}, + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"spark_config.0.main_class"}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenSparkJob(job *dataproc.SparkJob) []map[string]interface{} { + return []map[string]interface{}{ + { + "main_class": job.MainClass, + "main_jar_file_uri": job.MainJarFileUri, + "args": job.Args, + "jar_file_uris": job.JarFileUris, + "file_uris": job.FileUris, + "archive_uris": job.ArchiveUris, + "properties": job.Properties, + "logging_config": flattenLoggingConfig(job.LoggingConfig), + }, + } +} + +func expandSparkJob(config map[string]interface{}) *dataproc.SparkJob { + job := &dataproc.SparkJob{} + if v, ok := config["main_class"]; ok { + job.MainClass = v.(string) + } + if v, ok := config["main_jar_file_uri"]; ok { + job.MainJarFileUri = v.(string) + } + + if v, ok := config["args"]; ok { + job.Args = convertStringArr(v.([]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["file_uris"]; ok { + job.FileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["archive_uris"]; ok { + job.ArchiveUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = convertStringMap(v.(map[string]interface{})) + } + if v, ok := config["logging_config"]; ok { + config := extractFirstMapConfig(v.([]interface{})) + job.LoggingConfig = expandLoggingConfig(config) + } + + return job + +} + +// ---- Hadoop Job ---- + +var hadoopSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"spark_config", "pyspark_config", "hive_config", "pig_config", "sparksql_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main driver: can be only one of the main_class | main_jar_file_uri + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"hadoop_config.0.main_jar_file_uri"}, + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"hadoop_config.0.main_class"}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenHadoopJob(job *dataproc.HadoopJob) []map[string]interface{} { + return []map[string]interface{}{ + { + "main_class": job.MainClass, + "main_jar_file_uri": job.MainJarFileUri, + "args": job.Args, + "jar_file_uris": job.JarFileUris, + "file_uris": job.FileUris, + "archive_uris": job.ArchiveUris, + "properties": job.Properties, + "logging_config": flattenLoggingConfig(job.LoggingConfig), + }, + } +} + +func expandHadoopJob(config map[string]interface{}) *dataproc.HadoopJob { + job := &dataproc.HadoopJob{} + if v, ok := config["main_class"]; ok { + job.MainClass = v.(string) + } + if v, ok := config["main_jar_file_uri"]; ok { + job.MainJarFileUri = v.(string) + } + + if v, ok := config["args"]; ok { + job.Args = convertStringArr(v.([]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["file_uris"]; ok { + job.FileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["archive_uris"]; ok { + job.ArchiveUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = convertStringMap(v.(map[string]interface{})) + } + if v, ok := config["logging_config"]; ok { + config := extractFirstMapConfig(v.([]interface{})) + job.LoggingConfig = expandLoggingConfig(config) + } + + return job + +} + +// ---- Hive Job ---- + +var hiveSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "pig_config", "sparksql_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main query: can be only one of query_list | query_file_uri + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"hive_config.0.query_file_uri"}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"hive_config.0.query_list"}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, +} + +func flattenHiveJob(job *dataproc.HiveJob) []map[string]interface{} { + queries := []string{} + if job.QueryList != nil { + queries = job.QueryList.Queries + } + return []map[string]interface{}{ + { + "query_list": queries, + "query_file_uri": job.QueryFileUri, + "continue_on_failure": job.ContinueOnFailure, + "script_variables": job.ScriptVariables, + "properties": job.Properties, + "jar_file_uris": job.JarFileUris, + }, + } +} + +func expandHiveJob(config map[string]interface{}) *dataproc.HiveJob { + job := &dataproc.HiveJob{} + if v, ok := config["query_file_uri"]; ok { + job.QueryFileUri = v.(string) + } + if v, ok := config["query_list"]; ok { + job.QueryList = &dataproc.QueryList{ + Queries: convertStringArr(v.([]interface{})), + } + } + if v, ok := config["continue_on_failure"]; ok { + job.ContinueOnFailure = v.(bool) + } + if v, ok := config["script_variables"]; ok { + job.ScriptVariables = convertStringMap(v.(map[string]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = convertStringMap(v.(map[string]interface{})) + } + + return job +} + +// ---- Pig Job ---- + +var pigSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "sparksql_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main query: can be only one of query_list | query_file_uri + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"pig_config.0.query_file_uri"}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"pig_config.0.query_list"}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenPigJob(job *dataproc.PigJob) []map[string]interface{} { + queries := []string{} + if job.QueryList != nil { + queries = job.QueryList.Queries + } + return []map[string]interface{}{ + { + "query_list": queries, + "query_file_uri": job.QueryFileUri, + "continue_on_failure": job.ContinueOnFailure, + "script_variables": job.ScriptVariables, + "properties": job.Properties, + "jar_file_uris": job.JarFileUris, + }, + } +} + +func expandPigJob(config map[string]interface{}) *dataproc.PigJob { + job := &dataproc.PigJob{} + if v, ok := config["query_file_uri"]; ok { + job.QueryFileUri = v.(string) + } + if v, ok := config["query_list"]; ok { + job.QueryList = &dataproc.QueryList{ + Queries: convertStringArr(v.([]interface{})), + } + } + if v, ok := config["continue_on_failure"]; ok { + job.ContinueOnFailure = v.(bool) + } + if v, ok := config["script_variables"]; ok { + job.ScriptVariables = convertStringMap(v.(map[string]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = convertStringMap(v.(map[string]interface{})) + } + + return job + +} + +// ---- Spark SQL Job ---- + +var sparkSqlSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + ConflictsWith: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main query: can be only one of query_list | query_file_uri + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"pig_config.0.query_file_uri"}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"pig_config.0.query_list"}, + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenSparkSqlJob(job *dataproc.SparkSqlJob) []map[string]interface{} { + queries := []string{} + if job.QueryList != nil { + queries = job.QueryList.Queries + } + return []map[string]interface{}{ + { + "query_list": queries, + "query_file_uri": job.QueryFileUri, + "script_variables": job.ScriptVariables, + "properties": job.Properties, + "jar_file_uris": job.JarFileUris, + }, + } +} + +func expandSparkSqlJob(config map[string]interface{}) *dataproc.SparkSqlJob { + job := &dataproc.SparkSqlJob{} + if v, ok := config["query_file_uri"]; ok { + job.QueryFileUri = v.(string) + } + if v, ok := config["query_list"]; ok { + job.QueryList = &dataproc.QueryList{ + Queries: convertStringArr(v.([]interface{})), + } + } + if v, ok := config["script_variables"]; ok { + job.ScriptVariables = convertStringMap(v.(map[string]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = convertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = convertStringMap(v.(map[string]interface{})) + } + + return job + +} + +// ---- Other flatten / expand methods ---- + +func expandLoggingConfig(config map[string]interface{}) *dataproc.LoggingConfig { + conf := &dataproc.LoggingConfig{} + if v, ok := config["driver_log_levels"]; ok { + conf.DriverLogLevels = convertStringMap(v.(map[string]interface{})) + } + return conf +} + +func flattenLoggingConfig(l *dataproc.LoggingConfig) []map[string]interface{} { + return []map[string]interface{}{ + { + "driver_log_levels": l.DriverLogLevels, + }, + } +} + +func flattenJobReference(r *dataproc.JobReference) []map[string]interface{} { + return []map[string]interface{}{ + { + "job_id": r.JobId, + }, + } +} + +func flattenJobStatus(s *dataproc.JobStatus) []map[string]interface{} { + return []map[string]interface{}{ + { + "state": s.State, + "details": s.Details, + "state_start_time": s.StateStartTime, + "substate": s.Substate, + }, + } +} + +func flattenJobPlacement(jp *dataproc.JobPlacement) []map[string]interface{} { + return []map[string]interface{}{ + { + "cluster_name": jp.ClusterName, + "cluster_uuid": jp.ClusterUuid, + }, + } +} diff --git a/provider/terraform/resources/resource_dns_managed_zone.go b/provider/terraform/resources/resource_dns_managed_zone.go new file mode 100644 index 000000000000..175facd83779 --- /dev/null +++ b/provider/terraform/resources/resource_dns_managed_zone.go @@ -0,0 +1,182 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/dns/v1" +) + +func resourceDnsManagedZone() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsManagedZoneCreate, + Read: resourceDnsManagedZoneRead, + Update: resourceDnsManagedZoneUpdate, + Delete: resourceDnsManagedZoneDelete, + Importer: &schema.ResourceImporter{ + State: resourceDnsManagedZoneImport, + }, + Schema: map[string]*schema.Schema{ + "dns_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "Managed by Terraform", + }, + + "name_servers": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + zone := &dns.ManagedZone{ + Name: d.Get("name").(string), + DnsName: d.Get("dns_name").(string), + Description: d.Get("description").(string), + } + + if _, ok := d.GetOk("labels"); ok { + zone.Labels = expandLabels(d) + } + + log.Printf("[DEBUG] DNS ManagedZone create request: %#v", zone) + + zone, err = config.clientDns.ManagedZones.Create(project, zone).Do() + if err != nil { + return fmt.Errorf("Error creating DNS ManagedZone: %s", err) + } + + d.SetId(zone.Name) + + return resourceDnsManagedZoneRead(d, meta) +} + +func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := config.clientDns.ManagedZones.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DNS Managed Zone %q", d.Get("name").(string))) + } + + d.Set("name_servers", zone.NameServers) + d.Set("name", zone.Name) + d.Set("dns_name", zone.DnsName) + d.Set("description", zone.Description) + d.Set("project", project) + d.Set("labels", zone.Labels) + + return nil +} + +func resourceDnsManagedZoneUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := &dns.ManagedZone{ + Name: d.Get("name").(string), + DnsName: d.Get("dns_name").(string), + Description: d.Get("description").(string), + } + + if _, ok := d.GetOk("labels"); ok { + zone.Labels = expandLabels(d) + } + + op, err := config.clientDns.ManagedZones.Patch(project, d.Id(), zone).Do() + if err != nil { + return err + } + + err = dnsOperationWait(config.clientDns, op, project, "Updating DNS Managed Zone") + if err != nil { + return err + } + + return resourceDnsManagedZoneRead(d, meta) +} + +func resourceDnsManagedZoneDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + err = config.clientDns.ManagedZones.Delete(project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting DNS ManagedZone: %s", err) + } + + d.SetId("") + return nil +} + +func resourceDnsManagedZoneImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{"projects/(?P[^/]+)/managedZones/(?P[^/]+)", + "(?P[^/]+)/managedZones/(?P[^/]+)", + "(?P[^/]+)"}, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_dns_record_set.go b/provider/terraform/resources/resource_dns_record_set.go new file mode 100644 index 000000000000..5bf360271590 --- /dev/null +++ b/provider/terraform/resources/resource_dns_record_set.go @@ -0,0 +1,336 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/dns/v1" + "net" +) + +func resourceDnsRecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsRecordSetCreate, + Read: resourceDnsRecordSetRead, + Delete: resourceDnsRecordSetDelete, + Update: resourceDnsRecordSetUpdate, + Importer: &schema.ResourceImporter{ + State: resourceDnsRecordSetImportState, + }, + + Schema: map[string]*schema.Schema{ + "managed_zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "rrdatas": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if d.Get("type") == "AAAA" { + return ipv6AddressDiffSuppress(k, old, new, d) + } + return false + }, + }, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.ToLower(strings.Trim(old, `"`)) == strings.ToLower(strings.Trim(new, `"`)) + }, + }, + + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + zone := d.Get("managed_zone").(string) + rType := d.Get("type").(string) + + // Build the change + chg := &dns.Change{ + Additions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: name, + Type: rType, + Ttl: int64(d.Get("ttl").(int)), + Rrdatas: rrdata(d), + }, + }, + } + + // we need to replace NS record sets in the same call. That means + // we need to list all the current NS record sets attached to the + // zone and add them to the change as deletions. We can't just add + // new NS record sets, or we'll get an error about the NS record set + // already existing; see terraform-providers/terraform-provider-google#95. + // We also can't just remove the NS recordsets on creation, as at + // least one is required. So the solution is to "update in place" by + // putting the addition and the removal in the same API call. + if rType == "NS" { + log.Printf("[DEBUG] DNS record list request for %q", zone) + res, err := config.clientDns.ResourceRecordSets.List(project, zone).Do() + if err != nil { + return fmt.Errorf("Error retrieving record sets for %q: %s", zone, err) + } + var deletions []*dns.ResourceRecordSet + + for _, record := range res.Rrsets { + if record.Type != "NS" || record.Name != name { + continue + } + deletions = append(deletions, record) + } + if len(deletions) > 0 { + chg.Deletions = deletions + } + } + + log.Printf("[DEBUG] DNS Record create request: %#v", chg) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error creating DNS RecordSet: %s", err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", zone, name, rType)) + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + _, err = w.Conf().WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return resourceDnsRecordSetRead(d, meta) +} + +func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + + // name and type are effectively the 'key' + name := d.Get("name").(string) + dnsType := d.Get("type").(string) + + resp, err := config.clientDns.ResourceRecordSets.List( + project, zone).Name(name).Type(dnsType).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DNS Record Set %q", d.Get("name").(string))) + } + if len(resp.Rrsets) == 0 { + // The resource doesn't exist anymore + d.SetId("") + return nil + } + + if len(resp.Rrsets) > 1 { + return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) + } + + d.Set("type", resp.Rrsets[0].Type) + d.Set("ttl", resp.Rrsets[0].Ttl) + d.Set("rrdatas", resp.Rrsets[0].Rrdatas) + d.Set("project", project) + + return nil +} + +func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + + // NS records must always have a value, so we short-circuit delete + // this allows terraform delete to work, but may have unexpected + // side-effects when deleting just that record set. + // Unfortunately, you can set NS records on subdomains, and those + // CAN and MUST be deleted, so we need to retrieve the managed zone, + // check if what we're looking at is a subdomain, and only not delete + // if it's not actually a subdomain + if d.Get("type").(string) == "NS" { + mz, err := config.clientDns.ManagedZones.Get(project, zone).Do() + if err != nil { + return fmt.Errorf("Error retrieving managed zone %q from %q: %s", zone, project, err) + } + domain := mz.DnsName + + if domain == d.Get("name").(string) { + log.Println("[DEBUG] NS records can't be deleted due to API restrictions, so they're being left in place. See https://www.terraform.io/docs/providers/google/r/dns_record_set.html for more information.") + return nil + } + } + + // Build the change + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Ttl: int64(d.Get("ttl").(int)), + Rrdatas: rrdata(d), + }, + }, + } + + log.Printf("[DEBUG] DNS Record delete request: %#v", chg) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error deleting DNS RecordSet: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + _, err = w.Conf().WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + d.SetId("") + return nil +} + +func resourceDnsRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + recordName := d.Get("name").(string) + + oldTtl, newTtl := d.GetChange("ttl") + oldType, newType := d.GetChange("type") + + oldCountRaw, _ := d.GetChange("rrdatas.#") + oldCount := oldCountRaw.(int) + + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: oldType.(string), + Ttl: int64(oldTtl.(int)), + Rrdatas: make([]string, oldCount), + }, + }, + Additions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: newType.(string), + Ttl: int64(newTtl.(int)), + Rrdatas: rrdata(d), + }, + }, + } + + for i := 0; i < oldCount; i++ { + rrKey := fmt.Sprintf("rrdatas.%d", i) + oldRR, _ := d.GetChange(rrKey) + chg.Deletions[0].Rrdatas[i] = oldRR.(string) + } + log.Printf("[DEBUG] DNS Record change request: %#v old: %#v new: %#v", chg, chg.Deletions[0], chg.Additions[0]) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error changing DNS RecordSet: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + if _, err = w.Conf().WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return resourceDnsRecordSetRead(d, meta) +} + +func resourceDnsRecordSetImportState(d *schema.ResourceData, _ interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid dns record specifier. Expecting {zone-name}/{record-name}/{record-type}. The record name must include a trailing '.' at the end.") + } + + d.Set("managed_zone", parts[0]) + d.Set("name", parts[1]) + d.Set("type", parts[2]) + + return []*schema.ResourceData{d}, nil +} + +func rrdata( + d *schema.ResourceData, +) []string { + rrdatasCount := d.Get("rrdatas.#").(int) + data := make([]string, rrdatasCount) + for i := 0; i < rrdatasCount; i++ { + data[i] = d.Get(fmt.Sprintf("rrdatas.%d", i)).(string) + } + return data +} + +func ipv6AddressDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + oldIp := net.ParseIP(old) + newIp := net.ParseIP(new) + + return oldIp.Equal(newIp) +} diff --git a/provider/terraform/resources/resource_endpoints_service.go b/provider/terraform/resources/resource_endpoints_service.go new file mode 100644 index 000000000000..ccbee59e918a --- /dev/null +++ b/provider/terraform/resources/resource_endpoints_service.go @@ -0,0 +1,321 @@ +package google + +import ( + "encoding/base64" + "encoding/json" + "errors" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/servicemanagement/v1" +) + +func resourceEndpointsService() *schema.Resource { + return &schema.Resource{ + Create: resourceEndpointsServiceCreate, + Read: resourceEndpointsServiceRead, + Delete: resourceEndpointsServiceDelete, + Update: resourceEndpointsServiceUpdate, + + // Migrates protoc_output -> protoc_output_base64. + SchemaVersion: 1, + MigrateState: migrateEndpointsService, + + Schema: map[string]*schema.Schema{ + "service_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "openapi_config": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"grpc_config", "protoc_output_base64"}, + }, + "grpc_config": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "protoc_output": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Deprecated: "Please use protoc_output_base64 instead.", + }, + "protoc_output_base64": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "config_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "apis": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "syntax": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "version": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "methods": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "syntax": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "request_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "response_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "dns_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "endpoints": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func getOpenAPIConfigSource(configText string) servicemanagement.ConfigSource { + // We need to provide a ConfigSource object to the API whenever submitting a + // new config. A ConfigSource contains a ConfigFile which contains the b64 + // encoded contents of the file. OpenAPI requires only one file. + configfile := servicemanagement.ConfigFile{ + FileContents: base64.StdEncoding.EncodeToString([]byte(configText)), + FileType: "OPEN_API_YAML", + FilePath: "heredoc.yaml", + } + return servicemanagement.ConfigSource{ + Files: []*servicemanagement.ConfigFile{&configfile}, + } +} + +func getGRPCConfigSource(serviceConfig, protoConfig string) servicemanagement.ConfigSource { + // gRPC requires both the file specifying the service and the compiled protobuf, + // but they can be in any order. + ymlConfigfile := servicemanagement.ConfigFile{ + FileContents: base64.StdEncoding.EncodeToString([]byte(serviceConfig)), + FileType: "SERVICE_CONFIG_YAML", + FilePath: "heredoc.yaml", + } + protoConfigfile := servicemanagement.ConfigFile{ + FileContents: protoConfig, + FileType: "FILE_DESCRIPTOR_SET_PROTO", + FilePath: "api_def.pb", + } + return servicemanagement.ConfigSource{ + Files: []*servicemanagement.ConfigFile{&ymlConfigfile, &protoConfigfile}, + } +} + +func resourceEndpointsServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // If the service doesn't exist, we'll need to create it, but if it does, it + // will be reused. This is unusual for Terraform, but it causes the behavior + // that users will want and accept. Users of Endpoints are not thinking in + // terms of services, configs, and rollouts - they just want the setup declared + // in their config to happen. The fact that a service may need to be created + // is not interesting to them. Consequently, we create this service if necessary + // so that we can perform the rollout without further disruption, which is the + // action that a user running `terraform apply` is going to want. + serviceName := d.Get("service_name").(string) + servicesService := servicemanagement.NewServicesService(config.clientServiceMan) + _, err = servicesService.Get(serviceName).Do() + if err != nil { + _, err = servicesService.Create(&servicemanagement.ManagedService{ProducerProjectId: project, ServiceName: serviceName}).Do() + if err != nil { + return err + } + } + // Do a rollout using the update mechanism. + err = resourceEndpointsServiceUpdate(d, meta) + if err != nil { + return err + } + + d.SetId(serviceName) + return resourceEndpointsServiceRead(d, meta) +} + +func resourceEndpointsServiceUpdate(d *schema.ResourceData, meta interface{}) error { + // This update is not quite standard for a terraform resource. Instead of + // using the go client library to send an HTTP request to update something + // serverside, we have to push a new configuration, wait for it to be + // parsed and loaded, then create and push a rollout and wait for that + // rollout to be completed. + // There's a lot of moving parts there, and all of them have knobs that can + // be tweaked if the user is using gcloud. In the interest of simplicity, + // we currently only support full rollouts - anyone trying to do incremental + // rollouts or A/B testing is going to need a more precise tool than this resource. + config := meta.(*Config) + serviceName := d.Get("service_name").(string) + var source servicemanagement.ConfigSource + if openapiConfig, ok := d.GetOk("openapi_config"); ok { + source = getOpenAPIConfigSource(openapiConfig.(string)) + } else { + grpcConfig, gok := d.GetOk("grpc_config") + protocOutput, pok := d.GetOk("protoc_output_base64") + + // Support conversion from raw file -> base64 until the field is totally removed. + if !pok { + protocOutput, pok = d.GetOk("protoc_output") + if pok { + protocOutput = base64.StdEncoding.EncodeToString([]byte(protocOutput.(string))) + } + } + + if gok && pok { + source = getGRPCConfigSource(grpcConfig.(string), protocOutput.(string)) + } else { + return errors.New("Could not decypher config - please either set openapi_config or set both grpc_config and protoc_output_base64.") + } + } + + configService := servicemanagement.NewServicesConfigsService(config.clientServiceMan) + // The difference between "submit" and "create" is that submit parses the config + // you provide, where "create" requires the config in a pre-parsed format. + // "submit" will be a lot more flexible for users and will always be up-to-date + // with any new features that arise - this is why you provide a YAML config + // instead of providing the config in HCL. + op, err := configService.Submit(serviceName, &servicemanagement.SubmitConfigSourceRequest{ConfigSource: &source}).Do() + if err != nil { + return err + } + s, err := serviceManagementOperationWait(config, op, "Submitting service config.") + if err != nil { + return err + } + var serviceConfig servicemanagement.SubmitConfigSourceResponse + json.Unmarshal(s, &serviceConfig) + + // Next, we create a new rollout with the new config value, and wait for it to complete. + rolloutService := servicemanagement.NewServicesRolloutsService(config.clientServiceMan) + rollout := servicemanagement.Rollout{ + ServiceName: serviceName, + TrafficPercentStrategy: &servicemanagement.TrafficPercentStrategy{ + Percentages: map[string]float64{serviceConfig.ServiceConfig.Id: 100.0}, + }, + } + op, err = rolloutService.Create(serviceName, &rollout).Do() + if err != nil { + return err + } + _, err = serviceManagementOperationWait(config, op, "Performing service rollout.") + if err != nil { + return err + } + return resourceEndpointsServiceRead(d, meta) +} + +func resourceEndpointsServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + servicesService := servicemanagement.NewServicesService(config.clientServiceMan) + op, err := servicesService.Delete(d.Get("service_name").(string)).Do() + if err != nil { + return err + } + _, err = serviceManagementOperationWait(config, op, "Deleting service.") + d.SetId("") + return err +} + +func resourceEndpointsServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + servicesService := servicemanagement.NewServicesService(config.clientServiceMan) + service, err := servicesService.GetConfig(d.Get("service_name").(string)).Do() + if err != nil { + return err + } + d.Set("config_id", service.Id) + d.Set("dns_address", service.Name) + d.Set("apis", flattenServiceManagementAPIs(service.Apis)) + d.Set("endpoints", flattenServiceManagementEndpoints(service.Endpoints)) + + return nil +} + +func flattenServiceManagementAPIs(apis []*servicemanagement.Api) []map[string]interface{} { + flattened := make([]map[string]interface{}, len(apis)) + for i, a := range apis { + flattened[i] = map[string]interface{}{ + "name": a.Name, + "version": a.Version, + "syntax": a.Syntax, + "methods": flattenServiceManagementMethods(a.Methods), + } + } + return flattened +} + +func flattenServiceManagementMethods(methods []*servicemanagement.Method) []map[string]interface{} { + flattened := make([]map[string]interface{}, len(methods)) + for i, m := range methods { + flattened[i] = map[string]interface{}{ + "name": m.Name, + "syntax": m.Syntax, + "request_type": m.RequestTypeUrl, + "response_type": m.ResponseTypeUrl, + } + } + return flattened +} + +func flattenServiceManagementEndpoints(endpoints []*servicemanagement.Endpoint) []map[string]interface{} { + flattened := make([]map[string]interface{}, len(endpoints)) + for i, e := range endpoints { + flattened[i] = map[string]interface{}{ + "name": e.Name, + "address": e.Target, + } + } + return flattened +} diff --git a/provider/terraform/resources/resource_endpoints_service_migration.go b/provider/terraform/resources/resource_endpoints_service_migration.go new file mode 100644 index 000000000000..341f67a9c58e --- /dev/null +++ b/provider/terraform/resources/resource_endpoints_service_migration.go @@ -0,0 +1,23 @@ +package google + +import ( + "encoding/base64" + "fmt" + "github.com/hashicorp/terraform/terraform" + "log" +) + +func migrateEndpointsService(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + if is.Attributes["protoc_output"] == "" { + log.Println("[DEBUG] Nothing to migrate to V1.") + return is, nil + } + is.Attributes["protoc_output_base64"] = base64.StdEncoding.EncodeToString([]byte(is.Attributes["protoc_output"])) + is.Attributes["protoc_output"] = "" + return is, nil + default: + return nil, fmt.Errorf("Unexpected schema version: %d", v) + } +} diff --git a/provider/terraform/resources/resource_google_folder.go b/provider/terraform/resources/resource_google_folder.go new file mode 100644 index 000000000000..0e2fa893b2bb --- /dev/null +++ b/provider/terraform/resources/resource_google_folder.go @@ -0,0 +1,170 @@ +package google + +import ( + "encoding/json" + "fmt" + "github.com/hashicorp/terraform/helper/schema" + resourceManagerV2Beta1 "google.golang.org/api/cloudresourcemanager/v2beta1" + "strings" +) + +func resourceGoogleFolder() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleFolderCreate, + Read: resourceGoogleFolderRead, + Update: resourceGoogleFolderUpdate, + Delete: resourceGoogleFolderDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGoogleFolderImportState, + }, + + Schema: map[string]*schema.Schema{ + // Format is either folders/{folder_id} or organizations/{org_id}. + "parent": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + // Must be unique amongst its siblings. + "display_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + // Format is 'folders/{folder_id}. + // The terraform id holds the same value. + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "lifecycle_state": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "create_time": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceGoogleFolderCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + displayName := d.Get("display_name").(string) + parent := d.Get("parent").(string) + + op, err := config.clientResourceManagerV2Beta1.Folders.Create(&resourceManagerV2Beta1.Folder{ + DisplayName: displayName, + }).Parent(parent).Do() + + if err != nil { + return fmt.Errorf("Error creating folder '%s' in '%s': %s", displayName, parent, err) + } + + err = resourceManagerV2Beta1OperationWait(config.clientResourceManager, op, "creating folder") + + if err != nil { + return fmt.Errorf("Error creating folder '%s' in '%s': %s", displayName, parent, err) + } + + // Since we waited above, the operation is guaranteed to have been successful by this point. + waitOp, err := config.clientResourceManager.Operations.Get(op.Name).Do() + if err != nil { + return fmt.Errorf("The folder '%s' has been created but we could not retrieve its id. Delete the folder manually and retry or use 'terraform import': %s", displayName, err) + } + + // Requires 3 successive checks for safety. Nested IFs are used to avoid 3 error statement with the same message. + var responseMap map[string]interface{} + if err := json.Unmarshal(waitOp.Response, &responseMap); err == nil { + if val, ok := responseMap["name"]; ok { + if name, ok := val.(string); ok { + d.SetId(name) + return resourceGoogleFolderRead(d, meta) + } + } + } + return fmt.Errorf("The folder '%s' has been created but we could not retrieve its id. Delete the folder manually and retry or use 'terraform import'", displayName) +} + +func resourceGoogleFolderRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + folder, err := config.clientResourceManagerV2Beta1.Folders.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, d.Id()) + } + + d.Set("name", folder.Name) + d.Set("parent", folder.Parent) + d.Set("display_name", folder.DisplayName) + d.Set("lifecycle_state", folder.LifecycleState) + d.Set("create_time", folder.CreateTime) + + return nil +} + +func resourceGoogleFolderUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + displayName := d.Get("display_name").(string) + + d.Partial(true) + if d.HasChange("display_name") { + _, err := config.clientResourceManagerV2Beta1.Folders.Patch(d.Id(), &resourceManagerV2Beta1.Folder{ + DisplayName: displayName, + }).Do() + + if err != nil { + return fmt.Errorf("Error updating display_name to '%s': %s", displayName, err) + } + + d.SetPartial("display_name") + } + + if d.HasChange("parent") { + newParent := d.Get("parent").(string) + op, err := config.clientResourceManagerV2Beta1.Folders.Move(d.Id(), &resourceManagerV2Beta1.MoveFolderRequest{ + DestinationParent: newParent, + }).Do() + + if err != nil { + return fmt.Errorf("Error moving folder '%s' to '%s': %s", displayName, newParent, err) + } + + err = resourceManagerV2Beta1OperationWait(config.clientResourceManager, op, "move folder") + if err != nil { + return fmt.Errorf("Error moving folder '%s' to '%s': %s", displayName, newParent, err) + } + + d.SetPartial("parent") + } + + d.Partial(false) + + return nil +} + +func resourceGoogleFolderDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + displayName := d.Get("display_name").(string) + + _, err := config.clientResourceManagerV2Beta1.Folders.Delete(d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting folder %s", displayName) + } + + return nil +} + +func resourceGoogleFolderImportState(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + id := d.Id() + + if !strings.HasPrefix(d.Id(), "folders/") { + id = fmt.Sprintf("folders/%s", id) + } + + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_google_folder_organization_policy.go b/provider/terraform/resources/resource_google_folder_organization_policy.go new file mode 100644 index 000000000000..6a12247f7c19 --- /dev/null +++ b/provider/terraform/resources/resource_google_folder_organization_policy.go @@ -0,0 +1,112 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func resourceGoogleFolderOrganizationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleFolderOrganizationPolicyCreate, + Read: resourceGoogleFolderOrganizationPolicyRead, + Update: resourceGoogleFolderOrganizationPolicyUpdate, + Delete: resourceGoogleFolderOrganizationPolicyDelete, + + Schema: mergeSchemas( + schemaOrganizationPolicy, + map[string]*schema.Schema{ + "folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + ), + } +} + +func resourceGoogleFolderOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + if err := setFolderOrganizationPolicy(d, meta); err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s:%s", d.Get("folder"), d.Get("constraint"))) + + return resourceGoogleFolderOrganizationPolicyRead(d, meta) +} + +func resourceGoogleFolderOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + folder := canonicalFolderId(d.Get("folder").(string)) + + policy, err := config.clientResourceManager.Folders.GetOrgPolicy(folder, &cloudresourcemanager.GetOrgPolicyRequest{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", folder)) + } + + d.Set("constraint", policy.Constraint) + d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)) + d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)) + d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)) + d.Set("version", policy.Version) + d.Set("etag", policy.Etag) + d.Set("update_time", policy.UpdateTime) + + return nil +} + +func resourceGoogleFolderOrganizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + if err := setFolderOrganizationPolicy(d, meta); err != nil { + return err + } + + return resourceGoogleFolderOrganizationPolicyRead(d, meta) +} + +func resourceGoogleFolderOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + folder := canonicalFolderId(d.Get("folder").(string)) + + _, err := config.clientResourceManager.Folders.ClearOrgPolicy(folder, &cloudresourcemanager.ClearOrgPolicyRequest{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + + if err != nil { + return err + } + + return nil +} + +func setFolderOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + folder := canonicalFolderId(d.Get("folder").(string)) + + listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) + if err != nil { + return err + } + + restoreDefault, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) + if err != nil { + return err + } + + _, err = config.clientResourceManager.Folders.SetOrgPolicy(folder, &cloudresourcemanager.SetOrgPolicyRequest{ + Policy: &cloudresourcemanager.OrgPolicy{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), + ListPolicy: listPolicy, + RestoreDefault: restoreDefault, + Version: int64(d.Get("version").(int)), + Etag: d.Get("etag").(string), + }, + }).Do() + + return err +} diff --git a/provider/terraform/resources/resource_google_organization_iam_custom_role.go b/provider/terraform/resources/resource_google_organization_iam_custom_role.go new file mode 100644 index 000000000000..976419e3a96a --- /dev/null +++ b/provider/terraform/resources/resource_google_organization_iam_custom_role.go @@ -0,0 +1,233 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/iam/v1" +) + +func resourceGoogleOrganizationIamCustomRole() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleOrganizationIamCustomRoleCreate, + Read: resourceGoogleOrganizationIamCustomRoleRead, + Update: resourceGoogleOrganizationIamCustomRoleUpdate, + Delete: resourceGoogleOrganizationIamCustomRoleDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "role_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "title": { + Type: schema.TypeString, + Required: true, + }, + "permissions": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stage": { + Type: schema.TypeString, + Optional: true, + Default: "GA", + ValidateFunc: validation.StringInSlice([]string{"ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED", "EAP"}, false), + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "deleted": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Deprecated: `deleted will be converted to a computed-only field soon - if you want to delete this role, please use destroy`, + }, + }, + } +} + +func resourceGoogleOrganizationIamCustomRoleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if d.Get("deleted").(bool) { + return fmt.Errorf("cannot create a custom organization role with a deleted state. `deleted` field should be false.") + } + + org := d.Get("org_id").(string) + roleId := fmt.Sprintf("organizations/%s/roles/%s", org, d.Get("role_id").(string)) + orgId := fmt.Sprintf("organizations/%s", org) + + // Look for role with given ID. + // If it exists in deleted state, update to match "created" role state + // If it exists and and is enabled, return error - we should not try to recreate. + r, err := config.clientIAM.Organizations.Roles.Get(roleId).Do() + if err == nil { + if r.Deleted { + // This role was soft-deleted; update to match new state. + d.SetId(r.Name) + if err := resourceGoogleOrganizationIamCustomRoleUpdate(d, meta); err != nil { + // If update failed, make sure it wasn't actually added to state. + d.SetId("") + return err + } + } else { + // If a role with same name exists and is enabled, just return error + return fmt.Errorf("Custom project role %s already exists and must be imported", roleId) + } + } else if err := handleNotFoundError(err, d, fmt.Sprintf("Custom Organization Role %q", roleId)); err == nil { + // If no role was found, actually create a new role. + role, err := config.clientIAM.Organizations.Roles.Create(orgId, &iam.CreateRoleRequest{ + RoleId: d.Get("role_id").(string), + Role: &iam.Role{ + Title: d.Get("title").(string), + Description: d.Get("description").(string), + Stage: d.Get("stage").(string), + IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + }, + }).Do() + if err != nil { + return fmt.Errorf("Error creating the custom organization role %s: %s", d.Get("title").(string), err) + } + + d.SetId(role.Name) + } else { + return fmt.Errorf("Unable to verify whether custom org role %s already exists and must be undeleted: %v", roleId, err) + } + + return resourceGoogleOrganizationIamCustomRoleRead(d, meta) +} + +func resourceGoogleOrganizationIamCustomRoleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + role, err := config.clientIAM.Organizations.Roles.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, d.Id()) + } + + parsedRoleName, err := ParseOrganizationCustomRoleName(role.Name) + if err != nil { + return err + } + + d.Set("role_id", parsedRoleName.Name) + d.Set("org_id", parsedRoleName.OrgId) + d.Set("title", role.Title) + d.Set("description", role.Description) + d.Set("permissions", role.IncludedPermissions) + d.Set("stage", role.Stage) + d.Set("deleted", role.Deleted) + + return nil +} + +func resourceGoogleOrganizationIamCustomRoleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.Get("deleted").(bool) { + if d.HasChange("deleted") { + // If other fields were changed, we need to update those first and then delete. + // If we don't update, we will get diffs from re-apply + // If we delete and then try to update, we will get an error. + if err := resourceGoogleOrganizationIamCustomRoleUpdateNonDeletedFields(d, meta); err != nil { + return err + } + + if err := resourceGoogleOrganizationIamCustomRoleDelete(d, meta); err != nil { + return err + } + + d.SetPartial("deleted") + d.Partial(false) + return nil + } else { + return fmt.Errorf("cannot make changes to deleted custom organization role %s", d.Id()) + } + } + + // We want to update the role to some undeleted state. + // Make sure the role with given ID exists and is un-deleted before patching. + r, err := config.clientIAM.Organizations.Roles.Get(d.Id()).Do() + if err != nil { + return fmt.Errorf("unable to find custom project role %s to update: %v", d.Id(), err) + } + if r.Deleted { + if err := resourceGoogleOrganizationIamCustomRoleUndelete(d, meta); err != nil { + return err + } + d.SetPartial("deleted") + } + + if err := resourceGoogleOrganizationIamCustomRoleUpdateNonDeletedFields(d, meta); err != nil { + return err + } + d.Partial(false) + + return nil +} + +func resourceGoogleOrganizationIamCustomRoleUpdateNonDeletedFields(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if d.HasChange("title") || d.HasChange("description") || d.HasChange("stage") || d.HasChange("permissions") { + _, err := config.clientIAM.Organizations.Roles.Patch(d.Id(), &iam.Role{ + Title: d.Get("title").(string), + Description: d.Get("description").(string), + Stage: d.Get("stage").(string), + IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + }).Do() + + if err != nil { + return fmt.Errorf("Error updating the custom organization role %s: %s", d.Get("title").(string), err) + } + d.SetPartial("title") + d.SetPartial("description") + d.SetPartial("stage") + d.SetPartial("permissions") + } + + return nil +} + +func resourceGoogleOrganizationIamCustomRoleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + r, err := config.clientIAM.Organizations.Roles.Get(d.Id()).Do() + if err == nil && r != nil && r.Deleted && d.Get("deleted").(bool) { + // This role has already been deleted, don't try again. + return nil + } + + _, err = config.clientIAM.Organizations.Roles.Delete(d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting the custom organization role %s: %s", d.Get("title").(string), err) + } + + return nil +} + +func resourceGoogleOrganizationIamCustomRoleUndelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientIAM.Organizations.Roles.Undelete(d.Id(), &iam.UndeleteRoleRequest{}).Do() + if err != nil { + return fmt.Errorf("Error undeleting the custom organization role %s: %s", d.Get("title").(string), err) + } + + return nil +} diff --git a/provider/terraform/resources/resource_google_organization_policy.go b/provider/terraform/resources/resource_google_organization_policy.go new file mode 100644 index 000000000000..6e85947e44ce --- /dev/null +++ b/provider/terraform/resources/resource_google_organization_policy.go @@ -0,0 +1,374 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + "strings" +) + +var schemaOrganizationPolicy = map[string]*schema.Schema{ + "constraint": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + "boolean_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"list_policy", "restore_policy"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enforced": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "list_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"boolean_policy", "restore_policy"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allow": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"list_policy.0.deny"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{"list_policy.0.allow.0.values"}, + }, + "values": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + "deny": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ConflictsWith: []string{"list_policy.0.deny.0.values"}, + }, + "values": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + "suggested_value": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + "version": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + }, + "restore_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"boolean_policy", "list_policy"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, +} + +func resourceGoogleOrganizationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleOrganizationPolicyCreate, + Read: resourceGoogleOrganizationPolicyRead, + Update: resourceGoogleOrganizationPolicyUpdate, + Delete: resourceGoogleOrganizationPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceGoogleOrganizationPolicyImportState, + }, + + Schema: mergeSchemas( + schemaOrganizationPolicy, + map[string]*schema.Schema{ + "org_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }), + } +} + +func resourceGoogleOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + if err := setOrganizationPolicy(d, meta); err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s:%s", d.Get("org_id"), d.Get("constraint").(string))) + + return resourceGoogleOrganizationPolicyRead(d, meta) +} + +func resourceGoogleOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + org := "organizations/" + d.Get("org_id").(string) + + policy, err := config.clientResourceManager.Organizations.GetOrgPolicy(org, &cloudresourcemanager.GetOrgPolicyRequest{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", org)) + } + + d.Set("constraint", policy.Constraint) + d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)) + d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)) + d.Set("version", policy.Version) + d.Set("etag", policy.Etag) + d.Set("update_time", policy.UpdateTime) + d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)) + + return nil +} + +func resourceGoogleOrganizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + if err := setOrganizationPolicy(d, meta); err != nil { + return err + } + + return resourceGoogleOrganizationPolicyRead(d, meta) +} + +func resourceGoogleOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + org := "organizations/" + d.Get("org_id").(string) + + _, err := config.clientResourceManager.Organizations.ClearOrgPolicy(org, &cloudresourcemanager.ClearOrgPolicyRequest{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + + if err != nil { + return err + } + + return nil +} + +func resourceGoogleOrganizationPolicyImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), ":") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid id format. Expecting {org_id}:{constraint}, got '%s' instead.", d.Id()) + } + + d.Set("org_id", parts[0]) + d.Set("constraint", parts[1]) + + return []*schema.ResourceData{d}, nil +} + +func setOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + org := "organizations/" + d.Get("org_id").(string) + + listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) + if err != nil { + return err + } + + restoreDefault, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) + if err != nil { + return err + } + + _, err = config.clientResourceManager.Organizations.SetOrgPolicy(org, &cloudresourcemanager.SetOrgPolicyRequest{ + Policy: &cloudresourcemanager.OrgPolicy{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), + ListPolicy: listPolicy, + RestoreDefault: restoreDefault, + Version: int64(d.Get("version").(int)), + Etag: d.Get("etag").(string), + }, + }).Do() + + return err +} + +func flattenBooleanOrganizationPolicy(policy *cloudresourcemanager.BooleanPolicy) []map[string]interface{} { + bPolicies := make([]map[string]interface{}, 0, 1) + + if policy == nil { + return bPolicies + } + + bPolicies = append(bPolicies, map[string]interface{}{ + "enforced": policy.Enforced, + }) + + return bPolicies +} + +func flattenRestoreOrganizationPolicy(restore_policy *cloudresourcemanager.RestoreDefault) []map[string]interface{} { + rp := make([]map[string]interface{}, 0, 1) + + if restore_policy == nil { + return rp + } + + rp = append(rp, map[string]interface{}{ + "default": true, + }) + + return rp +} + +func expandBooleanOrganizationPolicy(configured []interface{}) *cloudresourcemanager.BooleanPolicy { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + booleanPolicy := configured[0].(map[string]interface{}) + return &cloudresourcemanager.BooleanPolicy{ + Enforced: booleanPolicy["enforced"].(bool), + } +} + +func expandRestoreOrganizationPolicy(configured []interface{}) (*cloudresourcemanager.RestoreDefault, error) { + if len(configured) == 0 || configured[0] == nil { + return nil, nil + } + + restoreDefaultMap := configured[0].(map[string]interface{}) + default_value := restoreDefaultMap["default"].(bool) + + if default_value { + return &cloudresourcemanager.RestoreDefault{}, nil + } + + return nil, fmt.Errorf("Invalid value for restore_policy. Expecting default = true") +} + +func flattenListOrganizationPolicy(policy *cloudresourcemanager.ListPolicy) []map[string]interface{} { + lPolicies := make([]map[string]interface{}, 0, 1) + + if policy == nil { + return lPolicies + } + + listPolicy := map[string]interface{}{} + switch { + case policy.AllValues == "ALLOW": + listPolicy["allow"] = []interface{}{map[string]interface{}{ + "all": true, + }} + case policy.AllValues == "DENY": + listPolicy["deny"] = []interface{}{map[string]interface{}{ + "all": true, + }} + case len(policy.AllowedValues) > 0: + listPolicy["allow"] = []interface{}{map[string]interface{}{ + "values": schema.NewSet(schema.HashString, convertStringArrToInterface(policy.AllowedValues)), + }} + case len(policy.DeniedValues) > 0: + listPolicy["deny"] = []interface{}{map[string]interface{}{ + "values": schema.NewSet(schema.HashString, convertStringArrToInterface(policy.DeniedValues)), + }} + } + + lPolicies = append(lPolicies, listPolicy) + + return lPolicies +} + +func expandListOrganizationPolicy(configured []interface{}) (*cloudresourcemanager.ListPolicy, error) { + if len(configured) == 0 || configured[0] == nil { + return nil, nil + } + + listPolicyMap := configured[0].(map[string]interface{}) + + allow := listPolicyMap["allow"].([]interface{}) + deny := listPolicyMap["deny"].([]interface{}) + + var allValues string + var allowedValues []string + var deniedValues []string + if len(allow) > 0 { + allowMap := allow[0].(map[string]interface{}) + all := allowMap["all"].(bool) + values := allowMap["values"].(*schema.Set) + + if all { + allValues = "ALLOW" + } else { + allowedValues = convertStringArr(values.List()) + } + } + + if len(deny) > 0 { + denyMap := deny[0].(map[string]interface{}) + all := denyMap["all"].(bool) + values := denyMap["values"].(*schema.Set) + + if all { + allValues = "DENY" + } else { + deniedValues = convertStringArr(values.List()) + } + } + + listPolicy := configured[0].(map[string]interface{}) + return &cloudresourcemanager.ListPolicy{ + AllValues: allValues, + AllowedValues: allowedValues, + DeniedValues: deniedValues, + SuggestedValue: listPolicy["suggested_value"].(string), + }, nil +} + +func canonicalOrgPolicyConstraint(constraint string) string { + if strings.HasPrefix(constraint, "constraints/") { + return constraint + } + return "constraints/" + constraint +} diff --git a/provider/terraform/resources/resource_google_project.go b/provider/terraform/resources/resource_google_project.go new file mode 100644 index 000000000000..553c06b73012 --- /dev/null +++ b/provider/terraform/resources/resource_google_project.go @@ -0,0 +1,702 @@ +package google + +import ( + "fmt" + "log" + "net/http" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + appengine "google.golang.org/api/appengine/v1" + "google.golang.org/api/cloudbilling/v1" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/googleapi" +) + +// resourceGoogleProject returns a *schema.Resource that allows a customer +// to declare a Google Cloud Project resource. +func resourceGoogleProject() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + Create: resourceGoogleProjectCreate, + Read: resourceGoogleProjectRead, + Update: resourceGoogleProjectUpdate, + Delete: resourceGoogleProjectDelete, + + Importer: &schema.ResourceImporter{ + State: resourceProjectImportState, + }, + MigrateState: resourceGoogleProjectMigrateState, + CustomizeDiff: resourceGoogleProjectCustomizeDiff, + + Schema: map[string]*schema.Schema{ + "project_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateProjectID(), + }, + "skip_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "auto_create_network": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validateProjectName(), + }, + "org_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "folder_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + StateFunc: parseFolderId, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Removed: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", + }, + "policy_etag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Removed: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", + }, + "number": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "billing_account": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "app_engine": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: appEngineResource(), + MaxItems: 1, + Deprecated: "Use the google_app_engine_application resource instead.", + }, + }, + } +} + +func appEngineResource() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auth_domain": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "location_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + "northamerica-northeast1", + "us-central", + "us-west2", + "us-east1", + "us-east4", + "southamerica-east1", + "europe-west", + "europe-west2", + "europe-west3", + "asia-northeast1", + "asia-south1", + "australia-southeast1", + }, false), + }, + "serving_status": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "UNSPECIFIED", + "SERVING", + "USER_DISABLED", + "SYSTEM_DISABLED", + }, false), + Computed: true, + }, + "feature_settings": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: appEngineFeatureSettingsResource(), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "url_dispatch_rule": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: appEngineURLDispatchRuleResource(), + }, + "code_bucket": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "default_hostname": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "default_bucket": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "gcr_domain": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func appEngineURLDispatchRuleResource() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "service": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func appEngineFeatureSettingsResource() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "split_health_checks": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + } +} + +func resourceGoogleProjectCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { + if old, _ := diff.GetChange("app_engine.0.location_id"); diff.HasChange("app_engine.0.location_id") && old != nil && old.(string) != "" { + return fmt.Errorf("Cannot change app_engine.0.location_id once the app is created.") + } + return nil +} + +func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + var pid string + var err error + pid = d.Get("project_id").(string) + + log.Printf("[DEBUG]: Creating new project %q", pid) + project := &cloudresourcemanager.Project{ + ProjectId: pid, + Name: d.Get("name").(string), + } + + if err := getParentResourceId(d, project); err != nil { + return err + } + + if _, ok := d.GetOk("labels"); ok { + project.Labels = expandLabels(d) + } + + op, err := config.clientResourceManager.Projects.Create(project).Do() + if err != nil { + return fmt.Errorf("error creating project %s (%s): %s. "+ + "If you received a 403 error, make sure you have the"+ + " `roles/resourcemanager.projectCreator` permission", + project.ProjectId, project.Name, err) + } + + d.SetId(pid) + + // Wait for the operation to complete + waitErr := resourceManagerOperationWait(config.clientResourceManager, op, "project to create") + if waitErr != nil { + // The resource wasn't actually created + d.SetId("") + return waitErr + } + + // Set the billing account + if _, ok := d.GetOk("billing_account"); ok { + err = updateProjectBillingAccount(d, config) + if err != nil { + return err + } + } + + // set up App Engine, too + app, err := expandAppEngineApp(d) + if err != nil { + return err + } + if app != nil { + log.Printf("[DEBUG] Enabling App Engine") + // enable the app engine APIs so we can create stuff + if err = enableService("appengine.googleapis.com", project.ProjectId, config); err != nil { + return fmt.Errorf("Error enabling the App Engine Admin API required to configure App Engine applications: %s", err) + } + log.Printf("[DEBUG] Enabled App Engine") + err = createAppEngineApp(config, pid, app) + if err != nil { + return err + } + } + + err = resourceGoogleProjectRead(d, meta) + if err != nil { + return err + } + + // There's no such thing as "don't auto-create network", only "delete the network + // post-creation" - but that's what it's called in the UI and let's not confuse + // people if we don't have to. The GCP Console is doing the same thing - creating + // a network and deleting it in the background. + if !d.Get("auto_create_network").(bool) { + // The compute API has to be enabled before we can delete a network. + if err = enableService("compute.googleapis.com", project.ProjectId, config); err != nil { + return fmt.Errorf("Error enabling the Compute Engine API required to delete the default network: %s", err) + } + + if err = forceDeleteComputeNetwork(project.ProjectId, "default", config); err != nil { + return fmt.Errorf("Error deleting default network in project %s: %s", project.ProjectId, err) + } + } + return nil +} + +func createAppEngineApp(config *Config, pid string, app *appengine.Application) error { + app.Id = pid + log.Printf("[DEBUG] Creating App Engine App") + op, err := config.clientAppEngine.Apps.Create(app).Do() + if err != nil { + return fmt.Errorf("Error creating App Engine application: %s", err.Error()) + } + + // Wait for the operation to complete + waitErr := appEngineOperationWait(config.clientAppEngine, op, pid, "App Engine app to create") + if waitErr != nil { + return waitErr + } + log.Printf("[DEBUG] Created App Engine App") + return nil +} + +func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Id() + + // Read the project + p, err := config.clientResourceManager.Projects.Get(pid).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Project %q", pid)) + } + + // If the project has been deleted from outside Terraform, remove it from state file. + if p.LifecycleState != "ACTIVE" { + log.Printf("[WARN] Removing project '%s' because its state is '%s' (requires 'ACTIVE').", pid, p.LifecycleState) + d.SetId("") + return nil + } + + d.Set("project_id", pid) + d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10)) + d.Set("name", p.Name) + d.Set("labels", p.Labels) + + if p.Parent != nil { + switch p.Parent.Type { + case "organization": + d.Set("org_id", p.Parent.Id) + d.Set("folder_id", "") + case "folder": + d.Set("folder_id", p.Parent.Id) + d.Set("org_id", "") + } + } + + // Read the billing account + ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() + if err != nil && !isApiNotEnabledError(err) { + return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) + } else if isApiNotEnabledError(err) { + log.Printf("[WARN] Billing info API not enabled, please enable it to read billing info about project %q: %s", pid, err.Error()) + } else if ba.BillingAccountName != "" { + // BillingAccountName is contains the resource name of the billing account + // associated with the project, if any. For example, + // `billingAccounts/012345-567890-ABCDEF`. We care about the ID and not + // the `billingAccounts/` prefix, so we need to remove that. If the + // prefix ever changes, we'll validate to make sure it's something we + // recognize. + _ba := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") + if ba.BillingAccountName == _ba { + return fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", prefixedProject(pid), ba.BillingAccountName) + } + d.Set("billing_account", _ba) + } + + // read the App Engine app, if one exists + // we don't have the config available for import, so we can't rely on + // that to read it. And honestly, we want to know if an App exists that + // shouldn't. So this tries to read it, sets it to empty if none exists, + // or sets it in state if one does exist. + app, err := config.clientAppEngine.Apps.Get(pid).Do() + if err != nil && !isGoogleApiErrorWithCode(err, 404) && !isApiNotEnabledError(err) { + return fmt.Errorf("Error retrieving App Engine application %q: %s", pid, err.Error()) + } else if isGoogleApiErrorWithCode(err, 404) { + d.Set("app_engine", []map[string]interface{}{}) + } else if isApiNotEnabledError(err) { + log.Printf("[WARN] App Engine Admin API not enabled, please enable it to read App Engine info about project %q: %s", pid, err.Error()) + d.Set("app_engine", []map[string]interface{}{}) + } else { + appBlocks, err := flattenAppEngineApp(app) + if err != nil { + return fmt.Errorf("Error serializing App Engine app: %s", err.Error()) + } + err = d.Set("app_engine", appBlocks) + if err != nil { + return fmt.Errorf("Error setting App Engine application in state. This is a bug, please report it at https://github.com/terraform-providers/terraform-provider-google/issues. Error is:\n%s", err.Error()) + } + } + return nil +} + +func prefixedProject(pid string) string { + return "projects/" + pid +} + +func getParentResourceId(d *schema.ResourceData, p *cloudresourcemanager.Project) error { + orgId := d.Get("org_id").(string) + folderId := d.Get("folder_id").(string) + + if orgId != "" && folderId != "" { + return fmt.Errorf("'org_id' and 'folder_id' cannot be both set.") + } + + if orgId != "" { + p.Parent = &cloudresourcemanager.ResourceId{ + Id: orgId, + Type: "organization", + } + } + + if folderId != "" { + p.Parent = &cloudresourcemanager.ResourceId{ + Id: parseFolderId(folderId), + Type: "folder", + } + } + + return nil +} + +func parseFolderId(v interface{}) string { + folderId := v.(string) + if strings.HasPrefix(folderId, "folders/") { + return folderId[8:] + } + return folderId +} + +func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Id() + project_name := d.Get("name").(string) + + // Read the project + // we need the project even though refresh has already been called + // because the API doesn't support patch, so we need the actual object + p, err := config.clientResourceManager.Projects.Get(pid).Do() + if err != nil { + if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { + return fmt.Errorf("Project %q does not exist.", pid) + } + return fmt.Errorf("Error checking project %q: %s", pid, err) + } + + d.Partial(true) + + // Project display name has changed + if ok := d.HasChange("name"); ok { + p.Name = project_name + // Do update on project + p, err = config.clientResourceManager.Projects.Update(p.ProjectId, p).Do() + if err != nil { + return fmt.Errorf("Error updating project %q: %s", project_name, err) + } + d.SetPartial("name") + } + + // Project parent has changed + if d.HasChange("org_id") || d.HasChange("folder_id") { + if err := getParentResourceId(d, p); err != nil { + return err + } + + // Do update on project + p, err = config.clientResourceManager.Projects.Update(p.ProjectId, p).Do() + if err != nil { + return fmt.Errorf("Error updating project %q: %s", project_name, err) + } + d.SetPartial("org_id") + d.SetPartial("folder_id") + } + + // Billing account has changed + if ok := d.HasChange("billing_account"); ok { + err = updateProjectBillingAccount(d, config) + if err != nil { + return err + } + } + + // Project Labels have changed + if ok := d.HasChange("labels"); ok { + p.Labels = expandLabels(d) + + // Do Update on project + p, err = config.clientResourceManager.Projects.Update(p.ProjectId, p).Do() + if err != nil { + return fmt.Errorf("Error updating project %q: %s", project_name, err) + } + d.SetPartial("labels") + } + + // App Engine App has changed + if ok := d.HasChange("app_engine"); ok { + app, err := expandAppEngineApp(d) + if err != nil { + return err + } + // ignore if app is now not set; that should force new resource using customizediff + if app != nil { + if old, new := d.GetChange("app_engine.#"); (old == nil || old.(int) < 1) && new != nil && new.(int) > 0 { + err = createAppEngineApp(config, pid, app) + if err != nil { + return err + } + } else { + log.Printf("[DEBUG] Updating App Engine App") + op, err := config.clientAppEngine.Apps.Patch(pid, app).UpdateMask("authDomain,servingStatus,featureSettings.splitHealthChecks").Do() + if err != nil { + return fmt.Errorf("Error creating App Engine application: %s", err.Error()) + } + + // Wait for the operation to complete + waitErr := appEngineOperationWait(config.clientAppEngine, op, pid, "App Engine app to update") + if waitErr != nil { + return waitErr + } + log.Printf("[DEBUG] Updated App Engine App") + } + d.SetPartial("app_engine") + } + } + + d.Partial(false) + + return resourceGoogleProjectRead(d, meta) +} + +func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + // Only delete projects if skip_delete isn't set + if !d.Get("skip_delete").(bool) { + pid := d.Id() + _, err := config.clientResourceManager.Projects.Delete(pid).Do() + if err != nil { + return fmt.Errorf("Error deleting project %q: %s", pid, err) + } + } + d.SetId("") + return nil +} + +func resourceProjectImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Explicitly set to default as a workaround for `ImportStateVerify` tests, and so that users + // don't see a diff immediately after import. + d.Set("auto_create_network", true) + return []*schema.ResourceData{d}, nil +} + +// Delete a compute network along with the firewall rules inside it. +func forceDeleteComputeNetwork(projectId, networkName string, config *Config) error { + networkLink := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/%s", projectId, networkName) + + token := "" + for paginate := true; paginate; { + filter := fmt.Sprintf("network eq %s", networkLink) + resp, err := config.clientCompute.Firewalls.List(projectId).Filter(filter).Do() + if err != nil { + return fmt.Errorf("Error listing firewall rules in proj: %s", err) + } + + log.Printf("[DEBUG] Found %d firewall rules in %q network", len(resp.Items), networkName) + + for _, firewall := range resp.Items { + op, err := config.clientCompute.Firewalls.Delete(projectId, firewall.Name).Do() + if err != nil { + return fmt.Errorf("Error deleting firewall: %s", err) + } + err = computeSharedOperationWait(config.clientCompute, op, projectId, "Deleting Firewall") + if err != nil { + return err + } + } + + token = resp.NextPageToken + paginate = token != "" + } + + return deleteComputeNetwork(projectId, networkName, config) +} + +func updateProjectBillingAccount(d *schema.ResourceData, config *Config) error { + pid := d.Id() + name := d.Get("billing_account").(string) + ba := &cloudbilling.ProjectBillingInfo{} + // If we're unlinking an existing billing account, an empty request does that, not an empty-string billing account. + if name != "" { + ba.BillingAccountName = "billingAccounts/" + name + } + _, err := config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), ba).Do() + if err != nil { + d.Set("billing_account", "") + if _err, ok := err.(*googleapi.Error); ok { + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), _err) + } + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), err) + } + for retries := 0; retries < 3; retries++ { + ba, err = config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() + if err != nil { + return err + } + baName := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") + if baName == name { + return nil + } + time.Sleep(3 * time.Second) + } + return fmt.Errorf("Timed out waiting for billing account to return correct value. Waiting for %s, got %s.", + name, strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/")) +} + +func expandAppEngineApp(d *schema.ResourceData) (*appengine.Application, error) { + blocks := d.Get("app_engine").([]interface{}) + if len(blocks) < 1 { + return nil, nil + } + if len(blocks) > 1 { + return nil, fmt.Errorf("only one app_engine block may be defined per project") + } + result := &appengine.Application{ + AuthDomain: d.Get("app_engine.0.auth_domain").(string), + LocationId: d.Get("app_engine.0.location_id").(string), + Id: d.Get("project_id").(string), + GcrDomain: d.Get("app_engine.0.gcr_domain").(string), + ServingStatus: d.Get("app_engine.0.serving_status").(string), + } + featureSettings, err := expandAppEngineFeatureSettings(d, "app_engine.0.") + if err != nil { + return nil, err + } + result.FeatureSettings = featureSettings + return result, nil +} + +func flattenAppEngineApp(app *appengine.Application) ([]map[string]interface{}, error) { + result := map[string]interface{}{ + "auth_domain": app.AuthDomain, + "code_bucket": app.CodeBucket, + "default_bucket": app.DefaultBucket, + "default_hostname": app.DefaultHostname, + "location_id": app.LocationId, + "name": app.Name, + "serving_status": app.ServingStatus, + } + dispatchRules, err := flattenAppEngineDispatchRules(app.DispatchRules) + if err != nil { + return nil, err + } + result["url_dispatch_rule"] = dispatchRules + featureSettings, err := flattenAppEngineFeatureSettings(app.FeatureSettings) + if err != nil { + return nil, err + } + result["feature_settings"] = featureSettings + return []map[string]interface{}{result}, nil +} + +func expandAppEngineFeatureSettings(d *schema.ResourceData, prefix string) (*appengine.FeatureSettings, error) { + blocks := d.Get(prefix + "feature_settings").([]interface{}) + if len(blocks) < 1 { + return nil, nil + } + if len(blocks) > 1 { + return nil, fmt.Errorf("only one feature_settings block may be defined per app") + } + return &appengine.FeatureSettings{ + SplitHealthChecks: d.Get(prefix + "feature_settings.0.split_health_checks").(bool), + // force send SplitHealthChecks, so if it's set to false it still gets disabled + ForceSendFields: []string{"SplitHealthChecks"}, + }, nil +} + +func flattenAppEngineFeatureSettings(settings *appengine.FeatureSettings) ([]map[string]interface{}, error) { + if settings == nil { + return []map[string]interface{}{}, nil + } + result := map[string]interface{}{ + "split_health_checks": settings.SplitHealthChecks, + } + return []map[string]interface{}{result}, nil +} + +func flattenAppEngineDispatchRules(rules []*appengine.UrlDispatchRule) ([]map[string]interface{}, error) { + results := make([]map[string]interface{}, 0, len(rules)) + for _, rule := range rules { + results = append(results, map[string]interface{}{ + "domain": rule.Domain, + "path": rule.Path, + "service": rule.Service, + }) + } + return results, nil +} diff --git a/provider/terraform/resources/resource_google_project_iam_custom_role.go b/provider/terraform/resources/resource_google_project_iam_custom_role.go new file mode 100644 index 000000000000..959cd4b7e15f --- /dev/null +++ b/provider/terraform/resources/resource_google_project_iam_custom_role.go @@ -0,0 +1,225 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/iam/v1" +) + +func resourceGoogleProjectIamCustomRole() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectIamCustomRoleCreate, + Read: resourceGoogleProjectIamCustomRoleRead, + Update: resourceGoogleProjectIamCustomRoleUpdate, + Delete: resourceGoogleProjectIamCustomRoleDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "role_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "title": { + Type: schema.TypeString, + Required: true, + }, + "permissions": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "stage": { + Type: schema.TypeString, + Optional: true, + Default: "GA", + ValidateFunc: validation.StringInSlice([]string{"ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED", "EAP"}, false), + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "deleted": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func resourceGoogleProjectIamCustomRoleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + if d.Get("deleted").(bool) { + return fmt.Errorf("Cannot create a custom project role with a deleted state. `deleted` field should be false.") + } + + roleId := fmt.Sprintf("projects/%s/roles/%s", project, d.Get("role_id").(string)) + r, err := config.clientIAM.Projects.Roles.Get(roleId).Do() + if err == nil { + if r.Deleted { + // This role was soft-deleted; update to match new state. + d.SetId(r.Name) + if err := resourceGoogleProjectIamCustomRoleUpdate(d, meta); err != nil { + // If update failed, make sure it wasn't actually added to state. + d.SetId("") + return err + } + } else { + // If a role with same name exists and is enabled, just return error + return fmt.Errorf("Custom project role %s already exists and must be imported", roleId) + } + } else if err := handleNotFoundError(err, d, fmt.Sprintf("Custom Project Role %q", roleId)); err == nil { + // If no role is found, actually create a new role. + role, err := config.clientIAM.Projects.Roles.Create("projects/"+project, &iam.CreateRoleRequest{ + RoleId: d.Get("role_id").(string), + Role: &iam.Role{ + Title: d.Get("title").(string), + Description: d.Get("description").(string), + Stage: d.Get("stage").(string), + IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + }, + }).Do() + if err != nil { + return fmt.Errorf("Error creating the custom project role %s: %v", roleId, err) + } + + d.SetId(role.Name) + } else { + return fmt.Errorf("Unable to verify whether custom project role %s already exists and must be undeleted: %v", roleId, err) + } + + return resourceGoogleProjectIamCustomRoleRead(d, meta) +} + +func resourceGoogleProjectIamCustomRoleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + role, err := config.clientIAM.Projects.Roles.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, d.Id()) + } + + d.Set("role_id", GetResourceNameFromSelfLink(role.Name)) + d.Set("title", role.Title) + d.Set("description", role.Description) + d.Set("permissions", role.IncludedPermissions) + d.Set("stage", role.Stage) + d.Set("deleted", role.Deleted) + d.Set("project", project) + + return nil +} + +func resourceGoogleProjectIamCustomRoleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.Get("deleted").(bool) { + if d.HasChange("deleted") { + // If other fields were changed, we need to update those first and then delete. + // If we don't update, we will get diffs from re-apply + // If we delete and then try to update, we will get an error. + if err := resourceGoogleProjectIamCustomRoleUpdateNonDeletedFields(d, meta); err != nil { + return err + } + if err := resourceGoogleProjectIamCustomRoleDelete(d, meta); err != nil { + return err + } + + d.SetPartial("deleted") + d.Partial(false) + return nil + } else { + return fmt.Errorf("cannot make changes to deleted custom project role %s", d.Id()) + } + } + + // We want to update the role to some undeleted state. + // Make sure the role with given ID exists and is un-deleted before patching. + r, err := config.clientIAM.Projects.Roles.Get(d.Id()).Do() + if err != nil { + return fmt.Errorf("unable to find custom project role %s to update: %v", d.Id(), err) + } + if r.Deleted { + // Undelete if deleted previously + if err := resourceGoogleProjectIamCustomRoleUndelete(d, meta); err != nil { + return err + } + d.SetPartial("deleted") + } + + if err := resourceGoogleProjectIamCustomRoleUpdateNonDeletedFields(d, meta); err != nil { + return err + } + d.Partial(false) + + return nil +} + +func resourceGoogleProjectIamCustomRoleUpdateNonDeletedFields(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if d.HasChange("title") || d.HasChange("description") || d.HasChange("stage") || d.HasChange("permissions") { + _, err := config.clientIAM.Projects.Roles.Patch(d.Id(), &iam.Role{ + Title: d.Get("title").(string), + Description: d.Get("description").(string), + Stage: d.Get("stage").(string), + IncludedPermissions: convertStringSet(d.Get("permissions").(*schema.Set)), + }).Do() + + if err != nil { + return fmt.Errorf("Error updating the custom project role %s: %s", d.Get("title").(string), err) + } + d.SetPartial("title") + d.SetPartial("description") + d.SetPartial("stage") + d.SetPartial("permissions") + } + return nil +} + +func resourceGoogleProjectIamCustomRoleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientIAM.Projects.Roles.Delete(d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting the custom project role %s: %s", d.Get("title").(string), err) + } + + return nil +} + +func resourceGoogleProjectIamCustomRoleUndelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientIAM.Projects.Roles.Undelete(d.Id(), &iam.UndeleteRoleRequest{}).Do() + if err != nil { + return fmt.Errorf("Error undeleting the custom project role %s: %s", d.Get("title").(string), err) + } + + return nil +} diff --git a/provider/terraform/resources/resource_google_project_iam_policy.go b/provider/terraform/resources/resource_google_project_iam_policy.go new file mode 100644 index 000000000000..8cfc8f4ce047 --- /dev/null +++ b/provider/terraform/resources/resource_google_project_iam_policy.go @@ -0,0 +1,425 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "sort" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func resourceGoogleProjectIamPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectIamPolicyCreate, + Read: resourceGoogleProjectIamPolicyRead, + Update: resourceGoogleProjectIamPolicyUpdate, + Delete: resourceGoogleProjectIamPolicyDelete, + Importer: &schema.ResourceImporter{ + State: resourceGoogleProjectIamPolicyImport, + }, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: jsonPolicyDiffSuppress, + }, + "authoritative": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Deprecated: "A future version of Terraform will remove the authoritative field. To ignore changes not managed by Terraform, use google_project_iam_binding and google_project_iam_member instead. See https://www.terraform.io/docs/providers/google/r/google_project_iam.html for more information.", + }, + "etag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "restore_policy": &schema.Schema{ + Deprecated: "This field will be removed alongside the authoritative field. To ignore changes not managed by Terraform, use google_project_iam_binding and google_project_iam_member instead. See https://www.terraform.io/docs/providers/google/r/google_project_iam.html for more information.", + Type: schema.TypeString, + Computed: true, + }, + "disable_project": &schema.Schema{ + Deprecated: "This will be removed with the authoritative field. Use lifecycle.prevent_destroy instead.", + Type: schema.TypeBool, + Optional: true, + }, + }, + } +} + +func resourceGoogleProjectIamPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid, err := getProject(d, config) + if err != nil { + return err + } + + mutexKey := getProjectIamPolicyMutexKey(pid) + mutexKV.Lock(mutexKey) + defer mutexKV.Unlock(mutexKey) + + // Get the policy in the template + p, err := getResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) + } + + // An authoritative policy is applied without regard for any existing IAM + // policy. + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + log.Printf("[DEBUG] Setting authoritative IAM policy for project %q", pid) + err := setProjectIamPolicy(p, config, pid) + if err != nil { + return err + } + } else { + log.Printf("[DEBUG] Setting non-authoritative IAM policy for project %q", pid) + // This is a non-authoritative policy, meaning it should be merged with + // any existing policy + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return err + } + + // First, subtract the policy defined in the template from the + // current policy in the project, and save the result. This will + // allow us to restore the original policy at some point (which + // assumes that Terraform owns any common policy that exists in + // the template and project at create time. + rp := subtractIamPolicy(ep, p) + rps, err := json.Marshal(rp) + if err != nil { + return fmt.Errorf("Error marshaling restorable IAM policy: %v", err) + } + d.Set("restore_policy", string(rps)) + + // Merge the policies together + mb := mergeBindings(append(p.Bindings, rp.Bindings...)) + ep.Bindings = mb + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + } + d.SetId(pid) + return resourceGoogleProjectIamPolicyRead(d, meta) +} + +func resourceGoogleProjectIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Reading google_project_iam_policy") + config := meta.(*Config) + pid, err := getProject(d, config) + if err != nil { + return err + } + + p, err := getProjectIamPolicy(pid, config) + if err != nil { + return err + } + + var bindings []*cloudresourcemanager.Binding + if v, ok := d.GetOk("restore_policy"); ok { + var restored cloudresourcemanager.Policy + // if there's a restore policy, subtract it from the policy_data + err := json.Unmarshal([]byte(v.(string)), &restored) + if err != nil { + return fmt.Errorf("Error unmarshaling restorable IAM policy: %v", err) + } + subtracted := subtractIamPolicy(p, &restored) + bindings = subtracted.Bindings + } else { + bindings = p.Bindings + } + // we only marshal the bindings, because only the bindings get set in the config + pBytes, err := json.Marshal(&cloudresourcemanager.Policy{Bindings: bindings}) + if err != nil { + return fmt.Errorf("Error marshaling IAM policy: %v", err) + } + log.Printf("[DEBUG]: Setting etag=%s", p.Etag) + d.Set("etag", p.Etag) + d.Set("policy_data", string(pBytes)) + d.Set("project", pid) + return nil +} + +func resourceGoogleProjectIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_project_iam_policy") + config := meta.(*Config) + pid, err := getProject(d, config) + if err != nil { + return err + } + + mutexKey := getProjectIamPolicyMutexKey(pid) + mutexKV.Lock(mutexKey) + defer mutexKV.Unlock(mutexKey) + + // Get the policy in the template + p, err := getResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) + } + pBytes, _ := json.Marshal(p) + log.Printf("[DEBUG] Got policy from config: %s", string(pBytes)) + + // An authoritative policy is applied without regard for any existing IAM + // policy. + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + log.Printf("[DEBUG] Updating authoritative IAM policy for project %q", pid) + err := setProjectIamPolicy(p, config, pid) + if err != nil { + return fmt.Errorf("Error setting project IAM policy: %v", err) + } + d.Set("restore_policy", "") + } else { + log.Printf("[DEBUG] Updating non-authoritative IAM policy for project %q", pid) + // Get the previous policy from state + pp, err := getPrevResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) + } + ppBytes, _ := json.Marshal(pp) + log.Printf("[DEBUG] Got previous version of changed project IAM policy: %s", string(ppBytes)) + + // Get the existing IAM policy from the API + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) + } + epBytes, _ := json.Marshal(ep) + log.Printf("[DEBUG] Got existing version of changed IAM policy from project API: %s", string(epBytes)) + + // Subtract the previous and current policies from the policy retrieved from the API + rp := subtractIamPolicy(ep, pp) + rpBytes, _ := json.Marshal(rp) + log.Printf("[DEBUG] After subtracting the previous policy from the existing policy, remaining policies: %s", string(rpBytes)) + rp = subtractIamPolicy(rp, p) + rpBytes, _ = json.Marshal(rp) + log.Printf("[DEBUG] After subtracting the remaining policies from the config policy, remaining policies: %s", string(rpBytes)) + rps, err := json.Marshal(rp) + if err != nil { + return fmt.Errorf("Error marhsaling restorable IAM policy: %v", err) + } + d.Set("restore_policy", string(rps)) + + // Merge the policies together + mb := mergeBindings(append(p.Bindings, rp.Bindings...)) + ep.Bindings = mb + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + } + + return resourceGoogleProjectIamPolicyRead(d, meta) +} + +func resourceGoogleProjectIamPolicyDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Deleting google_project_iam_policy") + config := meta.(*Config) + pid, err := getProject(d, config) + if err != nil { + return err + } + + mutexKey := getProjectIamPolicyMutexKey(pid) + mutexKV.Lock(mutexKey) + defer mutexKV.Unlock(mutexKey) + + // Get the existing IAM policy from the API + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) + } + // Deleting an authoritative policy will leave the project with no policy, + // and unaccessible by anyone without org-level privs. For this reason, the + // "disable_project" property must be set to true, forcing the user to ack + // this outcome + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + if v, ok := d.GetOk("disable_project"); !ok || !v.(bool) { + return fmt.Errorf("You must set 'disable_project' to true before deleting an authoritative IAM policy") + } + ep.Bindings = make([]*cloudresourcemanager.Binding, 0) + + } else { + // A non-authoritative policy should set the policy to the value of "restore_policy" in state + // Get the previous policy from state + rp, err := getRestoreIamPolicy(d) + if err != nil { + return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) + } + ep.Bindings = rp.Bindings + } + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + d.SetId("") + return nil +} + +func resourceGoogleProjectIamPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("project", d.Id()) + return []*schema.ResourceData{d}, nil +} + +// Subtract all bindings in policy b from policy a, and return the result +func subtractIamPolicy(a, b *cloudresourcemanager.Policy) *cloudresourcemanager.Policy { + am := rolesToMembersMap(a.Bindings) + + for _, b := range b.Bindings { + if _, ok := am[b.Role]; ok { + for _, m := range b.Members { + delete(am[b.Role], m) + } + if len(am[b.Role]) == 0 { + delete(am, b.Role) + } + } + } + a.Bindings = rolesToMembersBinding(am) + return a +} + +func setProjectIamPolicy(policy *cloudresourcemanager.Policy, config *Config, pid string) error { + // Apply the policy + pbytes, _ := json.Marshal(policy) + log.Printf("[DEBUG] Setting policy %#v for project: %s", string(pbytes), pid) + _, err := config.clientResourceManager.Projects.SetIamPolicy(pid, + &cloudresourcemanager.SetIamPolicyRequest{Policy: policy}).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error applying IAM policy for project %q. Policy is %#v, error is {{err}}", pid, policy), err) + } + return nil +} + +// Get a cloudresourcemanager.Policy from a schema.ResourceData +func getResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + ps := d.Get("policy_data").(string) + // The policy string is just a marshaled cloudresourcemanager.Policy. + policy := &cloudresourcemanager.Policy{} + if err := json.Unmarshal([]byte(ps), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + return policy, nil +} + +// Get the previous cloudresourcemanager.Policy from a schema.ResourceData if the +// resource has changed +func getPrevResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + var policy *cloudresourcemanager.Policy = &cloudresourcemanager.Policy{} + if d.HasChange("policy_data") { + v, _ := d.GetChange("policy_data") + if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) + } + } + return policy, nil +} + +// Get the restore_policy that can be used to restore a project's IAM policy to its +// state before it was adopted into Terraform +func getRestoreIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + if v, ok := d.GetOk("restore_policy"); ok { + policy := &cloudresourcemanager.Policy{} + if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) + } + return policy, nil + } + return nil, fmt.Errorf("Resource does not have a 'restore_policy' attribute defined.") +} + +// Retrieve the existing IAM Policy for a Project +func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { + p, err := config.clientResourceManager.Projects.GetIamPolicy(project, + &cloudresourcemanager.GetIamPolicyRequest{}).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) + } + return p, nil +} + +// Convert a map of roles->members to a list of Binding +func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding { + bindings := make([]*cloudresourcemanager.Binding, 0) + for role, members := range m { + b := cloudresourcemanager.Binding{ + Role: role, + Members: make([]string, 0), + } + for m, _ := range members { + b.Members = append(b.Members, m) + } + bindings = append(bindings, &b) + } + return bindings +} + +func jsonPolicyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + var oldPolicy, newPolicy cloudresourcemanager.Policy + if err := json.Unmarshal([]byte(old), &oldPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal old policy %s: %v", old, err) + return false + } + if err := json.Unmarshal([]byte(new), &newPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err) + return false + } + oldPolicy.Bindings = mergeBindings(oldPolicy.Bindings) + newPolicy.Bindings = mergeBindings(newPolicy.Bindings) + if newPolicy.Etag != oldPolicy.Etag { + return false + } + if newPolicy.Version != oldPolicy.Version { + return false + } + if len(newPolicy.Bindings) != len(oldPolicy.Bindings) { + return false + } + sort.Sort(sortableBindings(newPolicy.Bindings)) + sort.Sort(sortableBindings(oldPolicy.Bindings)) + for pos, newBinding := range newPolicy.Bindings { + oldBinding := oldPolicy.Bindings[pos] + if oldBinding.Role != newBinding.Role { + return false + } + if len(oldBinding.Members) != len(newBinding.Members) { + return false + } + sort.Strings(oldBinding.Members) + sort.Strings(newBinding.Members) + for i, newMember := range newBinding.Members { + oldMember := oldBinding.Members[i] + if newMember != oldMember { + return false + } + } + } + return true +} + +type sortableBindings []*cloudresourcemanager.Binding + +func (b sortableBindings) Len() int { + return len(b) +} +func (b sortableBindings) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} +func (b sortableBindings) Less(i, j int) bool { + return b[i].Role < b[j].Role +} + +func getProjectIamPolicyMutexKey(pid string) string { + return fmt.Sprintf("iam-project-%s", pid) +} diff --git a/provider/terraform/resources/resource_google_project_migrate.go b/provider/terraform/resources/resource_google_project_migrate.go new file mode 100644 index 000000000000..09fccd31186b --- /dev/null +++ b/provider/terraform/resources/resource_google_project_migrate.go @@ -0,0 +1,47 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceGoogleProjectMigrateState(v int, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if s.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return s, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Google Project State v0; migrating to v1") + s, err := migrateGoogleProjectStateV0toV1(s, meta.(*Config)) + if err != nil { + return s, err + } + return s, nil + default: + return s, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +// This migration adjusts google_project resources to include several additional attributes +// required to support project creation/deletion that was added in V1. +func migrateGoogleProjectStateV0toV1(s *terraform.InstanceState, config *Config) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes) + + s.Attributes["skip_delete"] = "true" + s.Attributes["project_id"] = s.ID + + if s.Attributes["policy_data"] != "" { + p, err := getProjectIamPolicy(s.ID, config) + if err != nil { + return s, fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err) + } + s.Attributes["policy_etag"] = p.Etag + } + + log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes) + return s, nil +} diff --git a/provider/terraform/resources/resource_google_project_organization_policy.go b/provider/terraform/resources/resource_google_project_organization_policy.go new file mode 100644 index 000000000000..ca497d5d2741 --- /dev/null +++ b/provider/terraform/resources/resource_google_project_organization_policy.go @@ -0,0 +1,112 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func resourceGoogleProjectOrganizationPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectOrganizationPolicyCreate, + Read: resourceGoogleProjectOrganizationPolicyRead, + Update: resourceGoogleProjectOrganizationPolicyUpdate, + Delete: resourceGoogleProjectOrganizationPolicyDelete, + + Schema: mergeSchemas( + schemaOrganizationPolicy, + map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + ), + } +} + +func resourceGoogleProjectOrganizationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + if err := setProjectOrganizationPolicy(d, meta); err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) + + return resourceGoogleProjectOrganizationPolicyRead(d, meta) +} + +func resourceGoogleProjectOrganizationPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project := prefixedProject(d.Get("project").(string)) + + policy, err := config.clientResourceManager.Projects.GetOrgPolicy(project, &cloudresourcemanager.GetOrgPolicyRequest{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Organization policy for %s", project)) + } + + d.Set("constraint", policy.Constraint) + d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)) + d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)) + d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)) + d.Set("version", policy.Version) + d.Set("etag", policy.Etag) + d.Set("update_time", policy.UpdateTime) + + return nil +} + +func resourceGoogleProjectOrganizationPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + if err := setProjectOrganizationPolicy(d, meta); err != nil { + return err + } + + return resourceGoogleProjectOrganizationPolicyRead(d, meta) +} + +func resourceGoogleProjectOrganizationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project := prefixedProject(d.Get("project").(string)) + + _, err := config.clientResourceManager.Projects.ClearOrgPolicy(project, &cloudresourcemanager.ClearOrgPolicyRequest{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + }).Do() + + if err != nil { + return err + } + + return nil +} + +func setProjectOrganizationPolicy(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project := prefixedProject(d.Get("project").(string)) + + listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) + if err != nil { + return err + } + + restore_default, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) + if err != nil { + return err + } + + _, err = config.clientResourceManager.Projects.SetOrgPolicy(project, &cloudresourcemanager.SetOrgPolicyRequest{ + Policy: &cloudresourcemanager.OrgPolicy{ + Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), + BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), + ListPolicy: listPolicy, + RestoreDefault: restore_default, + Version: int64(d.Get("version").(int)), + Etag: d.Get("etag").(string), + }, + }).Do() + + return err +} diff --git a/provider/terraform/resources/resource_google_project_service.go b/provider/terraform/resources/resource_google_project_service.go new file mode 100644 index 000000000000..d87db7cdd00a --- /dev/null +++ b/provider/terraform/resources/resource_google_project_service.go @@ -0,0 +1,156 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceGoogleProjectService() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectServiceCreate, + Read: resourceGoogleProjectServiceRead, + Delete: resourceGoogleProjectServiceDelete, + Update: resourceGoogleProjectServiceUpdate, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "disable_on_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + } +} + +func resourceGoogleProjectServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + srv := d.Get("service").(string) + + if err = enableService(srv, project, config); err != nil { + return errwrap.Wrapf("Error enabling service: {{err}}", err) + } + + d.SetId(projectServiceId{project, srv}.terraformId()) + return resourceGoogleProjectServiceRead(d, meta) +} + +func resourceGoogleProjectServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + id, err := parseProjectServiceId(d.Id()) + if err != nil { + return err + } + + project, err := config.clientResourceManager.Projects.Get(id.project).Do() + if err != nil { + return handleNotFoundError(err, d, id.project) + } + if project.LifecycleState == "DELETE_REQUESTED" { + log.Printf("[WARN] Removing %s from state, its project is deleted", id.terraformId()) + d.SetId("") + return nil + } + + services, err := getApiServices(id.project, config, map[string]struct{}{}) + if err != nil { + return err + } + + d.Set("project", id.project) + + for _, s := range services { + if s == id.service { + d.Set("service", s) + return nil + } + } + + // The service is not enabled server-side, so remove it from state + d.SetId("") + return nil +} + +func resourceGoogleProjectServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if disable := d.Get("disable_on_destroy"); !(disable.(bool)) { + log.Printf("Not disabling service '%s', because disable_on_destroy is false.", d.Id()) + d.SetId("") + return nil + } + + id, err := parseProjectServiceId(d.Id()) + if err != nil { + return err + } + + project, err := config.clientResourceManager.Projects.Get(id.project).Do() + if err != nil { + return handleNotFoundError(err, d, id.project) + } + if project.LifecycleState == "DELETE_REQUESTED" { + log.Printf("[WARN] Removing %s from state, its project is deleted", id.terraformId()) + d.SetId("") + return nil + } + + if err = disableService(id.service, id.project, config); err != nil { + return fmt.Errorf("Error disabling service: %s", err) + } + + d.SetId("") + return nil +} + +func resourceGoogleProjectServiceUpdate(d *schema.ResourceData, meta interface{}) error { + // The only thing that can be updated without a ForceNew is whether to disable the service on resource delete. + // This doesn't require any calls to any APIs since it's all internal state. + // This update is a no-op. + return nil +} + +// Parts that make up the id of a `google_project_service` resource. +// Project is included in order to allow multiple projects to enable the same service within the same Terraform state +type projectServiceId struct { + project string + service string +} + +func (id projectServiceId) terraformId() string { + return fmt.Sprintf("%s/%s", id.project, id.service) +} + +func parseProjectServiceId(id string) (*projectServiceId, error) { + parts := strings.Split(id, "/") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid google_project_service id format, expecting `{project}/{service}`, found %s", id) + } + + return &projectServiceId{parts[0], parts[1]}, nil +} diff --git a/provider/terraform/resources/resource_google_project_services.go b/provider/terraform/resources/resource_google_project_services.go new file mode 100644 index 000000000000..aecf2ab18003 --- /dev/null +++ b/provider/terraform/resources/resource_google_project_services.go @@ -0,0 +1,367 @@ +package google + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" + "google.golang.org/api/serviceusage/v1beta1" +) + +func resourceGoogleProjectServices() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectServicesCreate, + Read: resourceGoogleProjectServicesRead, + Update: resourceGoogleProjectServicesUpdate, + Delete: resourceGoogleProjectServicesDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "services": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "disable_on_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + } +} + +// These services can only be enabled as a side-effect of enabling other services, +// so don't bother storing them in the config or using them for diffing. +var ignoreProjectServices = map[string]struct{}{ + "dataproc-control.googleapis.com": struct{}{}, + "source.googleapis.com": struct{}{}, + "stackdriverprovisioning.googleapis.com": struct{}{}, +} + +func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + pid, err := getProject(d, config) + if err != nil { + return err + } + + // Get services from config + cfgServices := getConfigServices(d) + + // Get services from API + apiServices, err := getApiServices(pid, config, ignoreProjectServices) + if err != nil { + return fmt.Errorf("Error creating services: %v", err) + } + + // This call disables any APIs that aren't defined in cfgServices, + // and enables all of those that are + err = reconcileServices(cfgServices, apiServices, config, pid) + if err != nil { + return fmt.Errorf("Error creating services: %v", err) + } + + d.SetId(pid) + return resourceGoogleProjectServicesRead(d, meta) +} + +func resourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + services, err := getApiServices(d.Id(), config, ignoreProjectServices) + if err != nil { + return err + } + + d.Set("project", d.Id()) + d.Set("services", services) + return nil +} + +func resourceGoogleProjectServicesUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_project_services") + config := meta.(*Config) + + // Get services from config + cfgServices := getConfigServices(d) + + // Get services from API + apiServices, err := getApiServices(d.Id(), config, ignoreProjectServices) + if err != nil { + return fmt.Errorf("Error updating services: %v", err) + } + + // This call disables any APIs that aren't defined in cfgServices, + // and enables all of those that are + err = reconcileServices(cfgServices, apiServices, config, d.Id()) + if err != nil { + return fmt.Errorf("Error updating services: %v", err) + } + + return resourceGoogleProjectServicesRead(d, meta) +} + +func resourceGoogleProjectServicesDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Deleting google_project_services") + + if disable := d.Get("disable_on_destroy"); !(disable.(bool)) { + log.Printf("Not disabling service '%s', because disable_on_destroy is false.", d.Id()) + d.SetId("") + return nil + } + + config := meta.(*Config) + services := resourceServices(d) + for _, s := range services { + disableService(s, d.Id(), config) + } + d.SetId("") + return nil +} + +// This function ensures that the services enabled for a project exactly match that +// in a config by disabling any services that are returned by the API but not present +// in the config +func reconcileServices(cfgServices, apiServices []string, config *Config, pid string) error { + // Helper to convert slice to map + m := func(vals []string) map[string]struct{} { + sm := make(map[string]struct{}) + for _, s := range vals { + sm[s] = struct{}{} + } + return sm + } + + cfgMap := m(cfgServices) + apiMap := m(apiServices) + + for k, _ := range apiMap { + if _, ok := cfgMap[k]; !ok { + // The service in the API is not in the config; disable it. + err := disableService(k, pid, config) + if err != nil { + return err + } + } else { + // The service exists in the config and the API, so we don't need + // to re-enable it + delete(cfgMap, k) + } + } + + keys := make([]string, 0, len(cfgMap)) + for k, _ := range cfgMap { + keys = append(keys, k) + } + err := enableServices(keys, pid, config) + if err != nil { + return err + } + return nil +} + +// Retrieve services defined in a config +func getConfigServices(d *schema.ResourceData) (services []string) { + if v, ok := d.GetOk("services"); ok { + for _, svc := range v.(*schema.Set).List() { + services = append(services, svc.(string)) + } + } + return +} + +// Retrieve a project's services from the API +func getApiServices(pid string, config *Config, ignore map[string]struct{}) ([]string, error) { + if ignore == nil { + ignore = make(map[string]struct{}) + } + + var apiServices []string + + if err := retryTime(func() error { + // Reset the list of apiServices in case of a retry. A partial page failure + // could result in duplicate services. + apiServices = make([]string, 0, 10) + + ctx := context.Background() + return config.clientServiceUsage.Services. + List("projects/"+pid). + Fields("services/name,nextPageToken"). + Filter("state:ENABLED"). + Pages(ctx, func(r *serviceusage.ListServicesResponse) error { + for _, v := range r.Services { + // services are returned as "projects/PROJECT/services/NAME" + parts := strings.Split(v.Name, "/") + if len(parts) > 0 { + name := parts[len(parts)-1] + if _, ok := ignore[name]; !ok { + apiServices = append(apiServices, name) + } + } + } + + return nil + }) + }, 10); err != nil { + return nil, errwrap.Wrapf("failed to list services: {{err}}", err) + } + + return apiServices, nil +} + +func enableService(s, pid string, config *Config) error { + return enableServices([]string{s}, pid, config) +} + +func enableServices(s []string, pid string, config *Config) error { + // It's not permitted to enable more than 20 services in one API call (even + // for batch). + // + // https://godoc.org/google.golang.org/api/serviceusage/v1beta1#BatchEnableServicesRequest + batchSize := 20 + + for i := 0; i < len(s); i += batchSize { + j := i + batchSize + if j > len(s) { + j = len(s) + } + + services := s[i:j] + + if err := retryTime(func() error { + var sop *serviceusage.Operation + var err error + + if len(services) < 1 { + // No more services to enable + return nil + } else if len(services) == 1 { + // Use the singular enable - can't use batch for a single item + name := fmt.Sprintf("projects/%s/services/%s", pid, services[0]) + req := &serviceusage.EnableServiceRequest{} + sop, err = config.clientServiceUsage.Services.Enable(name, req).Do() + } else { + // Batch enable 2+ services + name := fmt.Sprintf("projects/%s", pid) + req := &serviceusage.BatchEnableServicesRequest{ServiceIds: services} + sop, err = config.clientServiceUsage.Services.BatchEnable(name, req).Do() + } + if err != nil { + // Check for a "precondition failed" error. The API seems to randomly + // (although more than 50%) return this error when enabling certain + // APIs. It's transient, so we catch it and re-raise it as an error that + // is retryable instead. + if gerr, ok := err.(*googleapi.Error); ok { + if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { + return &googleapi.Error{ + Code: 503, + Message: "api returned \"precondition failed\" while enabling service", + } + } + } + return errwrap.Wrapf("failed to issue request: {{err}}", err) + } + + // Poll for the API to return + activity := fmt.Sprintf("apis %q to be enabled for %s", services, pid) + _, waitErr := serviceUsageOperationWait(config, sop, activity) + if waitErr != nil { + return waitErr + } + + // Accumulate the list of services that are enabled on the project + enabledServices, err := getApiServices(pid, config, nil) + if err != nil { + return err + } + + // Diff the list of requested services to enable against the list of + // services on the project. + missing := diffStringSlice(services, enabledServices) + + // If there are any missing, force a retry + if len(missing) > 0 { + // Spoof a googleapi Error so retryTime will try again + return &googleapi.Error{ + Code: 503, + Message: fmt.Sprintf("The service(s) %q are still being enabled for project %s. This isn't a real API error, this is just eventual consistency.", missing, pid), + } + } + + return nil + }, 10); err != nil { + return errwrap.Wrap(err, fmt.Errorf("failed to enable service(s) %q for project %s", services, pid)) + } + } + + return nil +} + +func diffStringSlice(wanted, actual []string) []string { + var missing []string + + for _, want := range wanted { + found := false + + for _, act := range actual { + if want == act { + found = true + break + } + } + + if !found { + missing = append(missing, want) + } + } + + return missing +} + +func disableService(s, pid string, config *Config) error { + err := retryTime(func() error { + name := fmt.Sprintf("projects/%s/services/%s", pid, s) + sop, err := config.clientServiceUsage.Services.Disable(name, &serviceusage.DisableServiceRequest{}).Do() + if err != nil { + return err + } + // Wait for the operation to complete + _, waitErr := serviceUsageOperationWait(config, sop, "api to disable") + if waitErr != nil { + return waitErr + } + return nil + }, 10) + if err != nil { + return fmt.Errorf("Error disabling service %q for project %q: %v", s, pid, err) + } + return nil +} + +func resourceServices(d *schema.ResourceData) []string { + // Calculate the tags + var services []string + if s := d.Get("services"); s != nil { + ss := s.(*schema.Set) + services = make([]string, ss.Len()) + for i, v := range ss.List() { + services[i] = v.(string) + } + } + return services +} diff --git a/provider/terraform/resources/resource_google_service_account.go b/provider/terraform/resources/resource_google_service_account.go new file mode 100644 index 000000000000..14cb347a92cb --- /dev/null +++ b/provider/terraform/resources/resource_google_service_account.go @@ -0,0 +1,196 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/iam/v1" +) + +func resourceGoogleServiceAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleServiceAccountCreate, + Read: resourceGoogleServiceAccountRead, + Delete: resourceGoogleServiceAccountDelete, + Update: resourceGoogleServiceAccountUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateRFC1035Name(6, 30), + }, + "display_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Removed: "Use the 'google_service_account_iam_policy' resource to define policies for a service account", + }, + }, + } +} + +func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + aid := d.Get("account_id").(string) + displayName := d.Get("display_name").(string) + + sa := &iam.ServiceAccount{ + DisplayName: displayName, + } + + r := &iam.CreateServiceAccountRequest{ + AccountId: aid, + ServiceAccount: sa, + } + + sa, err = config.clientIAM.Projects.ServiceAccounts.Create("projects/"+project, r).Do() + if err != nil { + return fmt.Errorf("Error creating service account: %s", err) + } + + d.SetId(sa.Name) + + return resourceGoogleServiceAccountRead(d, meta) +} + +func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Confirm the service account exists + sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id())) + } + + d.Set("email", sa.Email) + d.Set("unique_id", sa.UniqueId) + d.Set("project", sa.ProjectId) + d.Set("account_id", strings.Split(sa.Email, "@")[0]) + d.Set("name", sa.Name) + d.Set("display_name", sa.DisplayName) + return nil +} + +func resourceGoogleServiceAccountDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + name := d.Id() + _, err := config.clientIAM.Projects.ServiceAccounts.Delete(name).Do() + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + if ok := d.HasChange("display_name"); ok { + sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() + if err != nil { + return fmt.Errorf("Error retrieving service account %q: %s", d.Id(), err) + } + _, err = config.clientIAM.Projects.ServiceAccounts.Update(d.Id(), + &iam.ServiceAccount{ + DisplayName: d.Get("display_name").(string), + Etag: sa.Etag, + }).Do() + if err != nil { + return fmt.Errorf("Error updating service account %q: %s", d.Id(), err) + } + } + + return nil +} + +// Retrieve the existing IAM Policy for a service account +func getServiceAccountIamPolicy(sa string, config *Config) (*iam.Policy, error) { + p, err := config.clientIAM.Projects.ServiceAccounts.GetIamPolicy(sa).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for service account %q: %s", sa, err) + } + return p, nil +} + +// Convert a map of roles->members to a list of Binding +func saRolesToMembersBinding(m map[string]map[string]bool) []*iam.Binding { + bindings := make([]*iam.Binding, 0) + for role, members := range m { + b := iam.Binding{ + Role: role, + Members: make([]string, 0), + } + for m, _ := range members { + b.Members = append(b.Members, m) + } + bindings = append(bindings, &b) + } + return bindings +} + +// Map a role to a map of members, allowing easy merging of multiple bindings. +func saRolesToMembersMap(bindings []*iam.Binding) map[string]map[string]bool { + bm := make(map[string]map[string]bool) + // Get each binding + for _, b := range bindings { + // Initialize members map + if _, ok := bm[b.Role]; !ok { + bm[b.Role] = make(map[string]bool) + } + // Get each member (user/principal) for the binding + for _, m := range b.Members { + // Add the member + bm[b.Role][m] = true + } + } + return bm +} + +// Merge multiple Bindings such that Bindings with the same Role result in +// a single Binding with combined Members +func saMergeBindings(bindings []*iam.Binding) []*iam.Binding { + bm := saRolesToMembersMap(bindings) + rb := make([]*iam.Binding, 0) + + for role, members := range bm { + var b iam.Binding + b.Role = role + b.Members = make([]string, 0) + for m, _ := range members { + b.Members = append(b.Members, m) + } + rb = append(rb, &b) + } + + return rb +} diff --git a/provider/terraform/resources/resource_google_service_account_key.go b/provider/terraform/resources/resource_google_service_account_key.go new file mode 100644 index 000000000000..92be5a23f60c --- /dev/null +++ b/provider/terraform/resources/resource_google_service_account_key.go @@ -0,0 +1,184 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/encryption" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/iam/v1" +) + +func resourceGoogleServiceAccountKey() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleServiceAccountKeyCreate, + Read: resourceGoogleServiceAccountKeyRead, + Delete: resourceGoogleServiceAccountKeyDelete, + Schema: map[string]*schema.Schema{ + // Required + "service_account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + // Optional + "key_algorithm": &schema.Schema{ + Type: schema.TypeString, + Default: "KEY_ALG_RSA_2048", + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"KEY_ALG_UNSPECIFIED", "KEY_ALG_RSA_1024", "KEY_ALG_RSA_2048"}, false), + }, + "pgp_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "private_key_type": &schema.Schema{ + Type: schema.TypeString, + Default: "TYPE_GOOGLE_CREDENTIALS_FILE", + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TYPE_UNSPECIFIED", "TYPE_PKCS12_FILE", "TYPE_GOOGLE_CREDENTIALS_FILE"}, false), + }, + "public_key_type": &schema.Schema{ + Type: schema.TypeString, + Default: "TYPE_X509_PEM_FILE", + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TYPE_NONE", "TYPE_X509_PEM_FILE", "TYPE_RAW_PUBLIC_KEY"}, false), + }, + // Computed + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + ForceNew: true, + }, + "public_key": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + }, + "private_key": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "valid_after": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "valid_before": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "private_key_encrypted": { + Type: schema.TypeString, + Computed: true, + }, + "private_key_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceGoogleServiceAccountKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + serviceAccountName, err := serviceAccountFQN(d.Get("service_account_id").(string), d, config) + if err != nil { + return err + } + + r := &iam.CreateServiceAccountKeyRequest{ + KeyAlgorithm: d.Get("key_algorithm").(string), + PrivateKeyType: d.Get("private_key_type").(string), + } + + sak, err := config.clientIAM.Projects.ServiceAccounts.Keys.Create(serviceAccountName, r).Do() + if err != nil { + return fmt.Errorf("Error creating service account key: %s", err) + } + + d.SetId(sak.Name) + // Data only available on create. + d.Set("valid_after", sak.ValidAfterTime) + d.Set("valid_before", sak.ValidBeforeTime) + if v, ok := d.GetOk("pgp_key"); ok { + encryptionKey, err := encryption.RetrieveGPGKey(v.(string)) + if err != nil { + return err + } + + fingerprint, encrypted, err := encryption.EncryptValue(encryptionKey, sak.PrivateKeyData, "Google Service Account Key") + if err != nil { + return err + } + + d.Set("private_key_encrypted", encrypted) + d.Set("private_key_fingerprint", fingerprint) + } else { + d.Set("private_key", sak.PrivateKeyData) + } + + err = serviceAccountKeyWaitTime(config.clientIAM.Projects.ServiceAccounts.Keys, d.Id(), d.Get("public_key_type").(string), "Creating Service account key", 4) + if err != nil { + return err + } + return resourceGoogleServiceAccountKeyRead(d, meta) +} + +func resourceGoogleServiceAccountKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + publicKeyType := d.Get("public_key_type").(string) + + // Confirm the service account key exists + sak, err := config.clientIAM.Projects.ServiceAccounts.Keys.Get(d.Id()).PublicKeyType(publicKeyType).Do() + if err != nil { + if err = handleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { + return nil + } else { + // This resource also returns 403 when it's not found. + if isGoogleApiErrorWithCode(err, 403) { + log.Printf("[DEBUG] Got a 403 error trying to read service account key %s, assuming it's gone.", d.Id()) + d.SetId("") + return nil + } else { + return err + } + } + } + + d.Set("name", sak.Name) + d.Set("key_algorithm", sak.KeyAlgorithm) + d.Set("public_key", sak.PublicKeyData) + return nil +} + +func resourceGoogleServiceAccountKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientIAM.Projects.ServiceAccounts.Keys.Delete(d.Id()).Do() + + if err != nil { + if err = handleNotFoundError(err, d, fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { + return nil + } else { + // This resource also returns 403 when it's not found. + if isGoogleApiErrorWithCode(err, 403) { + log.Printf("[DEBUG] Got a 403 error trying to read service account key %s, assuming it's gone.", d.Id()) + d.SetId("") + return nil + } else { + return err + } + } + } + + d.SetId("") + return nil +} diff --git a/provider/terraform/resources/resource_iam_binding.go b/provider/terraform/resources/resource_iam_binding.go new file mode 100644 index 000000000000..4c1a12f7b4c9 --- /dev/null +++ b/provider/terraform/resources/resource_iam_binding.go @@ -0,0 +1,230 @@ +package google + +import ( + "errors" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var iamBindingSchema = map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "members": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, +} + +func ResourceIamBinding(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc) *schema.Resource { + return &schema.Resource{ + Create: resourceIamBindingCreate(newUpdaterFunc), + Read: resourceIamBindingRead(newUpdaterFunc), + Update: resourceIamBindingUpdate(newUpdaterFunc), + Delete: resourceIamBindingDelete(newUpdaterFunc), + Schema: mergeSchemas(iamBindingSchema, parentSpecificSchema), + } +} + +func ResourceIamBindingWithImport(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) *schema.Resource { + r := ResourceIamBinding(parentSpecificSchema, newUpdaterFunc) + r.Importer = &schema.ResourceImporter{ + State: iamBindingImport(resourceIdParser), + } + return r +} + +func resourceIamBindingCreate(newUpdaterFunc newResourceIamUpdaterFunc) schema.CreateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + p := getResourceIamBinding(d) + err = iamPolicyReadModifyWrite(updater, func(ep *cloudresourcemanager.Policy) error { + // Creating a binding does not remove existing members if they are not in the provided members list. + // This prevents removing existing permission without the user's knowledge. + // Instead, a diff is shown in that case after creation. Subsequent calls to update will remove any + // existing members not present in the provided list. + ep.Bindings = mergeBindings(append(ep.Bindings, p)) + return nil + }) + if err != nil { + return err + } + d.SetId(updater.GetResourceId() + "/" + p.Role) + return resourceIamBindingRead(newUpdaterFunc)(d, meta) + } +} + +func resourceIamBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + eBinding := getResourceIamBinding(d) + p, err := updater.GetResourceIamPolicy() + if err != nil { + if isGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG]: Binding for role %q not found for non-existant resource %s, removing from state file.", updater.DescribeResource(), eBinding.Role) + d.SetId("") + return nil + } + + return err + } + log.Printf("[DEBUG]: Retrieved policy for %s: %+v", updater.DescribeResource(), p) + + var binding *cloudresourcemanager.Binding + for _, b := range p.Bindings { + if b.Role != eBinding.Role { + continue + } + binding = b + break + } + if binding == nil { + log.Printf("[DEBUG]: Binding for role %q not found in policy for %s, removing from state file.", eBinding.Role, updater.DescribeResource()) + d.SetId("") + return nil + } + d.Set("etag", p.Etag) + d.Set("members", binding.Members) + d.Set("role", binding.Role) + return nil + } +} + +func iamBindingImport(resourceIdParser resourceIdParserFunc) schema.StateFunc { + return func(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + if resourceIdParser == nil { + return nil, errors.New("Import not supported for this IAM resource.") + } + config := m.(*Config) + s := strings.Fields(d.Id()) + if len(s) != 2 { + d.SetId("") + return nil, fmt.Errorf("Wrong number of parts to Binding id %s; expected 'resource_name role'.", s) + } + id, role := s[0], s[1] + + // Set the ID only to the first part so all IAM types can share the same resourceIdParserFunc. + d.SetId(id) + d.Set("role", role) + err := resourceIdParser(d, config) + if err != nil { + return nil, err + } + + // Set the ID again so that the ID matches the ID it would have if it had been created via TF. + // Use the current ID in case it changed in the resourceIdParserFunc. + d.SetId(d.Id() + "/" + role) + // It is possible to return multiple bindings, since we can learn about all the bindings + // for this resource here. Unfortunately, `terraform import` has some messy behavior here - + // there's no way to know at this point which resource is being imported, so it's not possible + // to order this list in a useful way. In the event of a complex set of bindings, the user + // will have a terribly confusing set of imported resources and no way to know what matches + // up to what. And since the only users who will do a terraform import on their IAM bindings + // are users who aren't too familiar with Google Cloud IAM (because a "create" for bindings or + // members is idempotent), it's reasonable to expect that the user will be very alarmed by the + // plan that terraform will output which mentions destroying a dozen-plus IAM bindings. With + // that in mind, we return only the binding that matters. + return []*schema.ResourceData{d}, nil + } +} + +func resourceIamBindingUpdate(newUpdaterFunc newResourceIamUpdaterFunc) schema.UpdateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + binding := getResourceIamBinding(d) + err = iamPolicyReadModifyWrite(updater, func(p *cloudresourcemanager.Policy) error { + var found bool + for pos, b := range p.Bindings { + if b.Role != binding.Role { + continue + } + found = true + p.Bindings[pos] = binding + break + } + if !found { + p.Bindings = append(p.Bindings, binding) + } + return nil + }) + if err != nil { + return err + } + + return resourceIamBindingRead(newUpdaterFunc)(d, meta) + } +} + +func resourceIamBindingDelete(newUpdaterFunc newResourceIamUpdaterFunc) schema.DeleteFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + binding := getResourceIamBinding(d) + err = iamPolicyReadModifyWrite(updater, func(p *cloudresourcemanager.Policy) error { + toRemove := -1 + for pos, b := range p.Bindings { + if b.Role != binding.Role { + continue + } + toRemove = pos + break + } + if toRemove < 0 { + log.Printf("[DEBUG]: Policy bindings for %s did not include a binding for role %q", updater.DescribeResource(), binding.Role) + return nil + } + + p.Bindings = append(p.Bindings[:toRemove], p.Bindings[toRemove+1:]...) + return nil + }) + if err != nil { + if isGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG]: Resource %s is missing or deleted, marking policy binding as deleted", updater.DescribeResource()) + return nil + } + return err + } + + return resourceIamBindingRead(newUpdaterFunc)(d, meta) + } +} + +func getResourceIamBinding(d *schema.ResourceData) *cloudresourcemanager.Binding { + members := d.Get("members").(*schema.Set).List() + return &cloudresourcemanager.Binding{ + Members: convertStringArr(members), + Role: d.Get("role").(string), + } +} diff --git a/provider/terraform/resources/resource_iam_member.go b/provider/terraform/resources/resource_iam_member.go new file mode 100644 index 000000000000..e4b66e1f150d --- /dev/null +++ b/provider/terraform/resources/resource_iam_member.go @@ -0,0 +1,212 @@ +package google + +import ( + "errors" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamMemberBaseSchema = map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "member": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, +} + +func iamMemberImport(resourceIdParser resourceIdParserFunc) schema.StateFunc { + return func(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + if resourceIdParser == nil { + return nil, errors.New("Import not supported for this IAM resource.") + } + config := m.(*Config) + s := strings.Fields(d.Id()) + if len(s) != 3 { + d.SetId("") + return nil, fmt.Errorf("Wrong number of parts to Member id %s; expected 'resource_name role username'.", s) + } + id, role, member := s[0], s[1], s[2] + + // Set the ID only to the first part so all IAM types can share the same resourceIdParserFunc. + d.SetId(id) + d.Set("role", role) + d.Set("member", member) + err := resourceIdParser(d, config) + if err != nil { + return nil, err + } + + // Set the ID again so that the ID matches the ID it would have if it had been created via TF. + // Use the current ID in case it changed in the resourceIdParserFunc. + d.SetId(d.Id() + "/" + role + "/" + member) + return []*schema.ResourceData{d}, nil + } +} + +func ResourceIamMember(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc) *schema.Resource { + return &schema.Resource{ + Create: resourceIamMemberCreate(newUpdaterFunc), + Read: resourceIamMemberRead(newUpdaterFunc), + Delete: resourceIamMemberDelete(newUpdaterFunc), + + Schema: mergeSchemas(IamMemberBaseSchema, parentSpecificSchema), + } +} + +func ResourceIamMemberWithImport(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) *schema.Resource { + r := ResourceIamMember(parentSpecificSchema, newUpdaterFunc) + r.Importer = &schema.ResourceImporter{ + State: iamMemberImport(resourceIdParser), + } + return r +} + +func getResourceIamMember(d *schema.ResourceData) *cloudresourcemanager.Binding { + return &cloudresourcemanager.Binding{ + Members: []string{d.Get("member").(string)}, + Role: d.Get("role").(string), + } +} + +func resourceIamMemberCreate(newUpdaterFunc newResourceIamUpdaterFunc) schema.CreateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + p := getResourceIamMember(d) + err = iamPolicyReadModifyWrite(updater, func(ep *cloudresourcemanager.Policy) error { + // Merge the bindings together + ep.Bindings = mergeBindings(append(ep.Bindings, p)) + return nil + }) + if err != nil { + return err + } + d.SetId(updater.GetResourceId() + "/" + p.Role + "/" + p.Members[0]) + return resourceIamMemberRead(newUpdaterFunc)(d, meta) + } +} + +func resourceIamMemberRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + eMember := getResourceIamMember(d) + p, err := updater.GetResourceIamPolicy() + if err != nil { + if isGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG]: Binding of member %q with role %q does not exist for non-existant resource %s, removing from state.", eMember.Members[0], eMember.Role, updater.DescribeResource()) + d.SetId("") + return nil + } + return err + } + log.Printf("[DEBUG]: Retrieved policy for %s: %+v\n", updater.DescribeResource(), p) + + var binding *cloudresourcemanager.Binding + for _, b := range p.Bindings { + if b.Role != eMember.Role { + continue + } + binding = b + break + } + if binding == nil { + log.Printf("[DEBUG]: Binding for role %q does not exist in policy of %s, removing member %q from state.", eMember.Role, updater.DescribeResource(), eMember.Members[0]) + d.SetId("") + return nil + } + var member string + for _, m := range binding.Members { + if m == eMember.Members[0] { + member = m + } + } + if member == "" { + log.Printf("[DEBUG]: Member %q for binding for role %q does not exist in policy of %s, removing from state.", eMember.Members[0], eMember.Role, updater.DescribeResource()) + d.SetId("") + return nil + } + d.Set("etag", p.Etag) + d.Set("member", member) + d.Set("role", binding.Role) + return nil + } +} + +func resourceIamMemberDelete(newUpdaterFunc newResourceIamUpdaterFunc) schema.DeleteFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + member := getResourceIamMember(d) + err = iamPolicyReadModifyWrite(updater, func(p *cloudresourcemanager.Policy) error { + bindingToRemove := -1 + for pos, b := range p.Bindings { + if b.Role != member.Role { + continue + } + bindingToRemove = pos + break + } + if bindingToRemove < 0 { + log.Printf("[DEBUG]: Binding for role %q does not exist in policy of project %q, so member %q can't be on it.", member.Role, updater.GetResourceId(), member.Members[0]) + return nil + } + binding := p.Bindings[bindingToRemove] + memberToRemove := -1 + for pos, m := range binding.Members { + if m != member.Members[0] { + continue + } + memberToRemove = pos + break + } + if memberToRemove < 0 { + log.Printf("[DEBUG]: Member %q for binding for role %q does not exist in policy of project %q.", member.Members[0], member.Role, updater.GetResourceId()) + return nil + } + binding.Members = append(binding.Members[:memberToRemove], binding.Members[memberToRemove+1:]...) + if len(binding.Members) == 0 { + // If there is no member left for the role, remove the binding altogether + p.Bindings = append(p.Bindings[:bindingToRemove], p.Bindings[bindingToRemove+1:]...) + } else { + p.Bindings[bindingToRemove] = binding + } + + return nil + }) + if err != nil { + if isGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG]: Member %q for binding for role %q does not exist for non-existant resource %q.", member.Members[0], member.Role, updater.GetResourceId()) + return nil + } + return err + } + + return resourceIamMemberRead(newUpdaterFunc)(d, meta) + } +} diff --git a/provider/terraform/resources/resource_iam_policy.go b/provider/terraform/resources/resource_iam_policy.go new file mode 100644 index 000000000000..f366804dd3f5 --- /dev/null +++ b/provider/terraform/resources/resource_iam_policy.go @@ -0,0 +1,171 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" + + "encoding/json" + "errors" + "fmt" + "google.golang.org/api/cloudresourcemanager/v1" + "log" +) + +var IamPolicyBaseSchema = map[string]*schema.Schema{ + "policy_data": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: jsonPolicyDiffSuppress, + ValidateFunc: validateIamPolicy, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + }, +} + +func iamPolicyImport(resourceIdParser resourceIdParserFunc) schema.StateFunc { + return func(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + if resourceIdParser == nil { + return nil, errors.New("Import not supported for this IAM resource.") + } + config := m.(*Config) + err := resourceIdParser(d, config) + if err != nil { + return nil, err + } + return []*schema.ResourceData{d}, nil + } +} + +func ResourceIamPolicy(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc) *schema.Resource { + return &schema.Resource{ + Create: ResourceIamPolicyCreate(newUpdaterFunc), + Read: ResourceIamPolicyRead(newUpdaterFunc), + Update: ResourceIamPolicyUpdate(newUpdaterFunc), + Delete: ResourceIamPolicyDelete(newUpdaterFunc), + + Schema: mergeSchemas(IamPolicyBaseSchema, parentSpecificSchema), + } +} + +func ResourceIamPolicyWithImport(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) *schema.Resource { + r := ResourceIamPolicy(parentSpecificSchema, newUpdaterFunc) + r.Importer = &schema.ResourceImporter{ + State: iamPolicyImport(resourceIdParser), + } + return r +} + +func ResourceIamPolicyCreate(newUpdaterFunc newResourceIamUpdaterFunc) schema.CreateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + if err := setIamPolicyData(d, updater); err != nil { + return err + } + + d.SetId(updater.GetResourceId()) + return ResourceIamPolicyRead(newUpdaterFunc)(d, meta) + } +} + +func ResourceIamPolicyRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + policy, err := updater.GetResourceIamPolicy() + if err != nil { + if isGoogleApiErrorWithCode(err, 404) { + log.Printf("[DEBUG]: Policy does not exist for non-existant resource %q", updater.GetResourceId()) + return nil + } + return err + } + + d.Set("etag", policy.Etag) + d.Set("policy_data", marshalIamPolicy(policy)) + + return nil + } +} + +func ResourceIamPolicyUpdate(newUpdaterFunc newResourceIamUpdaterFunc) schema.UpdateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + if d.HasChange("policy_data") { + if err := setIamPolicyData(d, updater); err != nil { + return err + } + } + + return ResourceIamPolicyRead(newUpdaterFunc)(d, meta) + } +} + +func ResourceIamPolicyDelete(newUpdaterFunc newResourceIamUpdaterFunc) schema.DeleteFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + // Set an empty policy to delete the attached policy. + err = updater.SetResourceIamPolicy(&cloudresourcemanager.Policy{}) + if err != nil { + return err + } + + return nil + } +} + +func setIamPolicyData(d *schema.ResourceData, updater ResourceIamUpdater) error { + policy, err := unmarshalIamPolicy(d.Get("policy_data").(string)) + if err != nil { + return fmt.Errorf("'policy_data' is not valid for %s: %s", updater.DescribeResource(), err) + } + + err = updater.SetResourceIamPolicy(policy) + if err != nil { + return err + } + + return nil +} + +func marshalIamPolicy(policy *cloudresourcemanager.Policy) string { + pdBytes, _ := json.Marshal(&cloudresourcemanager.Policy{ + Bindings: policy.Bindings, + }) + return string(pdBytes) +} + +func unmarshalIamPolicy(policyData string) (*cloudresourcemanager.Policy, error) { + policy := &cloudresourcemanager.Policy{} + if err := json.Unmarshal([]byte(policyData), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal policy data %s:\n%s", policyData, err) + } + return policy, nil +} + +func validateIamPolicy(i interface{}, k string) (s []string, es []error) { + _, err := unmarshalIamPolicy(i.(string)) + if err != nil { + es = append(es, err) + } + return +} diff --git a/provider/terraform/resources/resource_kms_crypto_key.go b/provider/terraform/resources/resource_kms_crypto_key.go new file mode 100644 index 000000000000..abc8e676b068 --- /dev/null +++ b/provider/terraform/resources/resource_kms_crypto_key.go @@ -0,0 +1,313 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudkms/v1" +) + +func resourceKmsCryptoKey() *schema.Resource { + return &schema.Resource{ + Create: resourceKmsCryptoKeyCreate, + Read: resourceKmsCryptoKeyRead, + Update: resourceKmsCryptoKeyUpdate, + Delete: resourceKmsCryptoKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "key_ring": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: kmsCryptoKeyRingsEquivalent, + }, + "rotation_period": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateKmsCryptoKeyRotationPeriod, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func kmsCryptoKeyRingsEquivalent(k, old, new string, d *schema.ResourceData) bool { + keyRingIdWithSpecifiersRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-])+/keyRings/([a-zA-Z0-9_-]{1,63})$") + normalizedKeyRingIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") + if matches := keyRingIdWithSpecifiersRegex.FindStringSubmatch(new); matches != nil { + normMatches := normalizedKeyRingIdRegex.FindStringSubmatch(old) + return normMatches != nil && normMatches[1] == matches[1] && normMatches[2] == matches[2] && normMatches[3] == matches[3] + } + return false +} + +type kmsCryptoKeyId struct { + KeyRingId kmsKeyRingId + Name string +} + +func (s *kmsCryptoKeyId) cryptoKeyId() string { + return fmt.Sprintf("%s/cryptoKeys/%s", s.KeyRingId.keyRingId(), s.Name) +} + +func (s *kmsCryptoKeyId) parentId() string { + return s.KeyRingId.keyRingId() +} + +func (s *kmsCryptoKeyId) terraformId() string { + return fmt.Sprintf("%s/%s", s.KeyRingId.terraformId(), s.Name) +} + +func resourceKmsCryptoKeyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + keyRingId, err := parseKmsKeyRingId(d.Get("key_ring").(string), config) + + if err != nil { + return err + } + + cryptoKeyId := &kmsCryptoKeyId{ + KeyRingId: *keyRingId, + Name: d.Get("name").(string), + } + + key := cloudkms.CryptoKey{Purpose: "ENCRYPT_DECRYPT"} + + if d.Get("rotation_period") != "" { + rotationPeriod := d.Get("rotation_period").(string) + nextRotation, err := kmsCryptoKeyNextRotation(time.Now(), rotationPeriod) + + if err != nil { + return fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) + } + + key.NextRotationTime = nextRotation + key.RotationPeriod = rotationPeriod + } + + cryptoKey, err := config.clientKms.Projects.Locations.KeyRings.CryptoKeys.Create(cryptoKeyId.KeyRingId.keyRingId(), &key).CryptoKeyId(cryptoKeyId.Name).Do() + + if err != nil { + return fmt.Errorf("Error creating CryptoKey: %s", err.Error()) + } + + log.Printf("[DEBUG] Created CryptoKey %s", cryptoKey.Name) + + d.SetId(cryptoKeyId.cryptoKeyId()) + + return resourceKmsCryptoKeyRead(d, meta) +} + +func resourceKmsCryptoKeyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) + if err != nil { + return err + } + + key := cloudkms.CryptoKey{} + + if d.HasChange("rotation_period") && d.Get("rotation_period") != "" { + rotationPeriod := d.Get("rotation_period").(string) + nextRotation, err := kmsCryptoKeyNextRotation(time.Now(), rotationPeriod) + + if err != nil { + return fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) + } + + key.NextRotationTime = nextRotation + key.RotationPeriod = rotationPeriod + } + + cryptoKey, err := config.clientKms.Projects.Locations.KeyRings.CryptoKeys.Patch(cryptoKeyId.cryptoKeyId(), &key).UpdateMask("rotation_period,next_rotation_time").Do() + + if err != nil { + return fmt.Errorf("Error updating CryptoKey: %s", err.Error()) + } + + log.Printf("[DEBUG] Updated CryptoKey %s", cryptoKey.Name) + + d.SetId(cryptoKeyId.cryptoKeyId()) + + return resourceKmsCryptoKeyRead(d, meta) +} + +func resourceKmsCryptoKeyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Executing read for KMS CryptoKey %s", cryptoKeyId.cryptoKeyId()) + + cryptoKey, err := config.clientKms.Projects.Locations.KeyRings.CryptoKeys.Get(cryptoKeyId.cryptoKeyId()).Do() + if err != nil { + return fmt.Errorf("Error reading CryptoKey: %s", err) + } + d.Set("key_ring", cryptoKeyId.KeyRingId.terraformId()) + d.Set("name", cryptoKeyId.Name) + d.Set("rotation_period", cryptoKey.RotationPeriod) + d.Set("self_link", cryptoKey.Name) + + d.SetId(cryptoKeyId.cryptoKeyId()) + + return nil +} + +func clearCryptoKeyVersions(cryptoKeyId *kmsCryptoKeyId, config *Config) error { + versionsClient := config.clientKms.Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions + + versionsResponse, err := versionsClient.List(cryptoKeyId.cryptoKeyId()).Do() + + if err != nil { + return err + } + + for _, version := range versionsResponse.CryptoKeyVersions { + request := &cloudkms.DestroyCryptoKeyVersionRequest{} + _, err = versionsClient.Destroy(version.Name, request).Do() + + if err != nil { + return err + } + } + + return nil +} + +/* + Because KMS CryptoKey resources cannot be deleted on GCP, we are only going to remove it from state + and destroy all its versions, rendering the key useless for encryption and decryption of data. + Re-creation of this resource through Terraform will produce an error. +*/ + +func resourceKmsCryptoKeyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) + if err != nil { + return err + } + + log.Printf(` +[WARNING] KMS CryptoKey resources cannot be deleted from GCP. The CryptoKey %s will be removed from Terraform state, +and all its CryptoKeyVersions will be destroyed, but it will still be present on the server.`, cryptoKeyId.cryptoKeyId()) + + err = clearCryptoKeyVersions(cryptoKeyId, config) + + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func validateKmsCryptoKeyRotationPeriod(value interface{}, _ string) (ws []string, errors []error) { + period := value.(string) + pattern := regexp.MustCompile("^([0-9.]*\\d)s$") + match := pattern.FindStringSubmatch(period) + + if len(match) == 0 { + errors = append(errors, fmt.Errorf("Invalid rotation period format: %s", period)) + // Cannot continue to validate because we cannot extract a number. + return + } + + number := match[1] + seconds, err := strconv.ParseFloat(number, 64) + + if err != nil { + errors = append(errors, err) + } else { + if seconds < 86400.0 { + errors = append(errors, fmt.Errorf("Rotation period must be greater than one day")) + } + + parts := strings.Split(number, ".") + + if len(parts) > 1 && len(parts[1]) > 9 { + errors = append(errors, fmt.Errorf("Rotation period cannot have more than 9 fractional digits")) + } + } + + return +} + +func kmsCryptoKeyNextRotation(now time.Time, period string) (result string, err error) { + var duration time.Duration + + duration, err = time.ParseDuration(period) + + if err == nil { + result = now.UTC().Add(duration).Format(time.RFC3339Nano) + } + + return +} + +func parseKmsCryptoKeyId(id string, config *Config) (*kmsCryptoKeyId, error) { + parts := strings.Split(id, "/") + + cryptoKeyIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") + cryptoKeyIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") + cryptoKeyRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})/cryptoKeys/([a-zA-Z0-9_-]{1,63})$") + + if cryptoKeyIdRegex.MatchString(id) { + return &kmsCryptoKeyId{ + KeyRingId: kmsKeyRingId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, + Name: parts[3], + }, nil + } + + if cryptoKeyIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}/{cryptoKeyName}` id format.") + } + + return &kmsCryptoKeyId{ + KeyRingId: kmsKeyRingId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, + Name: parts[2], + }, nil + } + + if parts := cryptoKeyRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &kmsCryptoKeyId{ + KeyRingId: kmsKeyRingId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, + Name: parts[4], + }, nil + } + return nil, fmt.Errorf("Invalid CryptoKey id format, expecting `{projectId}/{locationId}/{KeyringName}/{cryptoKeyName}` or `{locationId}/{keyRingName}/{cryptoKeyName}.`") +} diff --git a/provider/terraform/resources/resource_kms_key_ring.go b/provider/terraform/resources/resource_kms_key_ring.go new file mode 100644 index 000000000000..1d86b4de2239 --- /dev/null +++ b/provider/terraform/resources/resource_kms_key_ring.go @@ -0,0 +1,206 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudkms/v1" +) + +func resourceKmsKeyRing() *schema.Resource { + return &schema.Resource{ + Create: resourceKmsKeyRingCreate, + Read: resourceKmsKeyRingRead, + Delete: resourceKmsKeyRingDelete, + Importer: &schema.ResourceImporter{ + State: resourceKmsKeyRingImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +type kmsKeyRingId struct { + Project string + Location string + Name string +} + +func (s *kmsKeyRingId) keyRingId() string { + return fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", s.Project, s.Location, s.Name) +} + +func (s *kmsKeyRingId) parentId() string { + return fmt.Sprintf("projects/%s/locations/%s", s.Project, s.Location) +} + +func (s *kmsKeyRingId) terraformId() string { + return fmt.Sprintf("%s/%s/%s", s.Project, s.Location, s.Name) +} + +func resourceKmsKeyRingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + keyRingId := &kmsKeyRingId{ + Project: project, + Location: d.Get("location").(string), + Name: d.Get("name").(string), + } + + // This resource is often created just after a project, and requires + // billing support, which is eventually consistent. We attempt to + // wait on billing support in the project resource, but we can't + // always get it right - this retry fixes a lot of flaky tests we were + // noticing. + err = retryTimeDuration(func() error { + keyRing, err := config.clientKms.Projects.Locations.KeyRings.Create(keyRingId.parentId(), &cloudkms.KeyRing{}).KeyRingId(keyRingId.Name).Do() + + if err != nil { + return fmt.Errorf("Error creating KeyRing: %s", err) + } + + log.Printf("[DEBUG] Created KeyRing %s", keyRing.Name) + + d.SetId(keyRingId.keyRingId()) + return nil + }, time.Duration(30*time.Second)) + if err != nil { + return err + } + + return resourceKmsKeyRingRead(d, meta) +} + +func resourceKmsKeyRingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + keyRingId, err := parseKmsKeyRingId(d.Id(), config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Executing read for KMS KeyRing %s", keyRingId.keyRingId()) + + keyRing, err := config.clientKms.Projects.Locations.KeyRings.Get(keyRingId.keyRingId()).Do() + + if err != nil { + return fmt.Errorf("Error reading KeyRing: %s", err) + } + + d.Set("project", project) + d.Set("self_link", keyRing.Name) + + return nil +} + +/* + Because KMS KeyRing resources cannot be deleted on GCP, we are only going to remove it from state. + Re-creation of this resource through Terraform will produce an error. +*/ + +func resourceKmsKeyRingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + keyRingId, err := parseKmsKeyRingId(d.Id(), config) + if err != nil { + return err + } + + log.Printf("[WARNING] KMS KeyRing resources cannot be deleted from GCP. This KeyRing %s will be removed from Terraform state, but will still be present on the server.", keyRingId.keyRingId()) + + d.SetId("") + + return nil +} + +func parseKmsKeyRingId(id string, config *Config) (*kmsKeyRingId, error) { + parts := strings.Split(id, "/") + + keyRingIdRegex := regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") + keyRingIdWithoutProjectRegex := regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") + keyRingRelativeLinkRegex := regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})$") + + if keyRingIdRegex.MatchString(id) { + return &kmsKeyRingId{ + Project: parts[0], + Location: parts[1], + Name: parts[2], + }, nil + } + + if keyRingIdWithoutProjectRegex.MatchString(id) { + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}` id format.") + } + + return &kmsKeyRingId{ + Project: config.Project, + Location: parts[0], + Name: parts[1], + }, nil + } + + if parts := keyRingRelativeLinkRegex.FindStringSubmatch(id); parts != nil { + return &kmsKeyRingId{ + Project: parts[1], + Location: parts[2], + Name: parts[3], + }, nil + } + return nil, fmt.Errorf("Invalid KeyRing id format, expecting `{projectId}/{locationId}/{keyRingName}` or `{locationId}/{keyRingName}.`") +} + +func resourceKmsKeyRingImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + keyRingId, err := parseKmsKeyRingId(d.Id(), config) + if err != nil { + return nil, err + } + + d.Set("name", keyRingId.Name) + d.Set("location", keyRingId.Location) + + if config.Project != keyRingId.Project { + d.Set("project", keyRingId.Project) + } + + d.SetId(keyRingId.keyRingId()) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_logging_billing_account_sink.go b/provider/terraform/resources/resource_logging_billing_account_sink.go new file mode 100644 index 000000000000..ecde951ef112 --- /dev/null +++ b/provider/terraform/resources/resource_logging_billing_account_sink.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceLoggingBillingAccountSink() *schema.Resource { + schm := &schema.Resource{ + Create: resourceLoggingBillingAccountSinkCreate, + Read: resourceLoggingBillingAccountSinkRead, + Delete: resourceLoggingBillingAccountSinkDelete, + Update: resourceLoggingBillingAccountSinkUpdate, + Schema: resourceLoggingSinkSchema(), + Importer: &schema.ResourceImporter{ + State: resourceLoggingSinkImportState("billing_account"), + }, + } + schm.Schema["billing_account"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + } + return schm +} + +func resourceLoggingBillingAccountSinkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + id, sink := expandResourceLoggingSink(d, "billingAccounts", d.Get("billing_account").(string)) + + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err := config.clientLogging.BillingAccounts.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + d.SetId(id.canonicalId()) + return resourceLoggingBillingAccountSinkRead(d, meta) +} + +func resourceLoggingBillingAccountSinkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sink, err := config.clientLogging.BillingAccounts.Sinks.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Billing Logging Sink %s", d.Get("name").(string))) + } + + flattenResourceLoggingSink(d, sink) + return nil + +} + +func resourceLoggingBillingAccountSinkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sink := expandResourceLoggingSinkForUpdate(d) + + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err := config.clientLogging.BillingAccounts.Sinks.Patch(d.Id(), sink). + UpdateMask(defaultLogSinkUpdateMask).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + return resourceLoggingBillingAccountSinkRead(d, meta) +} + +func resourceLoggingBillingAccountSinkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientLogging.Projects.Sinks.Delete(d.Id()).Do() + if err != nil { + return err + } + + return nil +} diff --git a/provider/terraform/resources/resource_logging_exclusion.go b/provider/terraform/resources/resource_logging_exclusion.go new file mode 100644 index 000000000000..6298b1b5237c --- /dev/null +++ b/provider/terraform/resources/resource_logging_exclusion.go @@ -0,0 +1,265 @@ +package google + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/logging/v2" +) + +var LoggingExclusionBaseSchema = map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "disabled": { + Type: schema.TypeBool, + Optional: true, + }, +} + +func ResourceLoggingExclusion(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceLoggingExclusionUpdaterFunc, resourceIdParser resourceIdParserFunc) *schema.Resource { + return &schema.Resource{ + Create: resourceLoggingExclusionCreate(newUpdaterFunc), + Read: resourceLoggingExclusionRead(newUpdaterFunc), + Update: resourceLoggingExclusionUpdate(newUpdaterFunc), + Delete: resourceLoggingExclusionDelete(newUpdaterFunc), + + Importer: &schema.ResourceImporter{ + State: resourceLoggingExclusionImportState(resourceIdParser), + }, + + Schema: mergeSchemas(LoggingExclusionBaseSchema, parentSpecificSchema), + } +} + +func resourceLoggingExclusionCreate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.CreateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + id, exclusion := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) + + err = updater.CreateLoggingExclusion(id.parent(), exclusion) + if err != nil { + return err + } + + d.SetId(id.canonicalId()) + + return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) + } +} + +func resourceLoggingExclusionRead(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.ReadFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + exclusion, err := updater.ReadLoggingExclusion(d.Id()) + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Logging Exclusion %s", d.Get("name").(string))) + } + + flattenResourceLoggingExclusion(d, exclusion) + + if updater.GetResourceType() == "projects" { + d.Set("project", updater.GetResourceId()) + } + + return nil + } +} + +func resourceLoggingExclusionUpdate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.UpdateFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + exclusion, updateMask := expandResourceLoggingExclusionForUpdate(d) + + err = updater.UpdateLoggingExclusion(d.Id(), exclusion, updateMask) + if err != nil { + return err + } + + return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) + } +} + +func resourceLoggingExclusionDelete(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) schema.DeleteFunc { + return func(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + updater, err := newUpdaterFunc(d, config) + if err != nil { + return err + } + + err = updater.DeleteLoggingExclusion(d.Id()) + if err != nil { + return err + } + + d.SetId("") + return nil + } +} + +func resourceLoggingExclusionImportState(resourceIdParser resourceIdParserFunc) schema.StateFunc { + return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + err := resourceIdParser(d, config) + if err != nil { + return nil, err + } + return []*schema.ResourceData{d}, nil + } +} + +func expandResourceLoggingExclusion(d *schema.ResourceData, resourceType, resourceId string) (LoggingExclusionId, *logging.LogExclusion) { + id := LoggingExclusionId{ + resourceType: resourceType, + resourceId: resourceId, + name: d.Get("name").(string), + } + + exclusion := logging.LogExclusion{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Filter: d.Get("filter").(string), + Disabled: d.Get("disabled").(bool), + } + return id, &exclusion +} + +func flattenResourceLoggingExclusion(d *schema.ResourceData, exclusion *logging.LogExclusion) { + d.Set("name", exclusion.Name) + d.Set("description", exclusion.Description) + d.Set("filter", exclusion.Filter) + d.Set("disabled", exclusion.Disabled) +} + +func expandResourceLoggingExclusionForUpdate(d *schema.ResourceData) (*logging.LogExclusion, string) { + // Can update description/filter/disabled right now. + exclusion := logging.LogExclusion{} + + var updateMaskArr []string + + if d.HasChange("description") { + exclusion.Description = d.Get("description").(string) + exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Description") + updateMaskArr = append(updateMaskArr, "description") + } + + if d.HasChange("filter") { + exclusion.Filter = d.Get("filter").(string) + exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Filter") + updateMaskArr = append(updateMaskArr, "filter") + } + + if d.HasChange("disabled") { + exclusion.Disabled = d.Get("disabled").(bool) + exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Disabled") + updateMaskArr = append(updateMaskArr, "disabled") + } + + updateMask := strings.Join(updateMaskArr, ",") + return &exclusion, updateMask +} + +// The ResourceLoggingExclusionUpdater interface is implemented for each GCP +// resource supporting log exclusions. +// +// Implementations should keep track of the resource identifier. +type ResourceLoggingExclusionUpdater interface { + CreateLoggingExclusion(parent string, exclusion *logging.LogExclusion) error + ReadLoggingExclusion(id string) (*logging.LogExclusion, error) + UpdateLoggingExclusion(id string, exclusion *logging.LogExclusion, updateMask string) error + DeleteLoggingExclusion(id string) error + + GetResourceType() string + + // Returns the unique resource identifier. + GetResourceId() string + + // Textual description of this resource to be used in error message. + // The description should include the unique resource identifier. + DescribeResource() string +} + +type newResourceLoggingExclusionUpdaterFunc func(d *schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) + +// loggingExclusionResourceTypes contains all the possible Stackdriver Logging resource types. Used to parse ids safely. +var loggingExclusionResourceTypes = []string{ + "billingAccounts", + "folders", + "organizations", + "projects", +} + +// LoggingExclusionId represents the parts that make up the canonical id used within terraform for a logging resource. +type LoggingExclusionId struct { + resourceType string + resourceId string + name string +} + +// loggingExclusionIdRegex matches valid logging exclusion canonical ids +var loggingExclusionIdRegex = regexp.MustCompile("(.+)/(.+)/exclusions/(.+)") + +// canonicalId returns the LoggingExclusionId as the canonical id used within terraform. +func (l LoggingExclusionId) canonicalId() string { + return fmt.Sprintf("%s/%s/exclusions/%s", l.resourceType, l.resourceId, l.name) +} + +// parent returns the "parent-level" resource that the exclusion is in (e.g. `folders/foo` for id `folders/foo/exclusions/bar`) +func (l LoggingExclusionId) parent() string { + return fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) +} + +// parseLoggingExclusionId parses a canonical id into a LoggingExclusionId, or returns an error on failure. +func parseLoggingExclusionId(id string) (*LoggingExclusionId, error) { + parts := loggingExclusionIdRegex.FindStringSubmatch(id) + if parts == nil { + return nil, fmt.Errorf("unable to parse logging exclusion id %#v", id) + } + // If our resourceType is not a valid logging exclusion resource type, complain loudly + validLoggingExclusionResourceType := false + for _, v := range loggingExclusionResourceTypes { + if v == parts[1] { + validLoggingExclusionResourceType = true + break + } + } + + if !validLoggingExclusionResourceType { + return nil, fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], + loggingExclusionResourceTypes) + } + return &LoggingExclusionId{ + resourceType: parts[1], + resourceId: parts[2], + name: parts[3], + }, nil +} diff --git a/provider/terraform/resources/resource_logging_folder_sink.go b/provider/terraform/resources/resource_logging_folder_sink.go new file mode 100644 index 000000000000..a4ecd3b450b0 --- /dev/null +++ b/provider/terraform/resources/resource_logging_folder_sink.go @@ -0,0 +1,98 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceLoggingFolderSink() *schema.Resource { + schm := &schema.Resource{ + Create: resourceLoggingFolderSinkCreate, + Read: resourceLoggingFolderSinkRead, + Delete: resourceLoggingFolderSinkDelete, + Update: resourceLoggingFolderSinkUpdate, + Schema: resourceLoggingSinkSchema(), + Importer: &schema.ResourceImporter{ + State: resourceLoggingSinkImportState("folder"), + }, + } + schm.Schema["folder"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + return strings.Replace(v.(string), "folders/", "", 1) + }, + } + schm.Schema["include_children"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + } + + return schm +} + +func resourceLoggingFolderSinkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + folder := parseFolderId(d.Get("folder")) + id, sink := expandResourceLoggingSink(d, "folders", folder) + sink.IncludeChildren = d.Get("include_children").(bool) + + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err := config.clientLogging.Folders.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + d.SetId(id.canonicalId()) + return resourceLoggingFolderSinkRead(d, meta) +} + +func resourceLoggingFolderSinkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sink, err := config.clientLogging.Folders.Sinks.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Folder Logging Sink %s", d.Get("name").(string))) + } + + flattenResourceLoggingSink(d, sink) + d.Set("include_children", sink.IncludeChildren) + + return nil +} + +func resourceLoggingFolderSinkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sink := expandResourceLoggingSinkForUpdate(d) + // It seems the API might actually accept an update for include_children; this is not in the list of updatable + // properties though and might break in the future. Always include the value to prevent it changing. + sink.IncludeChildren = d.Get("include_children").(bool) + sink.ForceSendFields = append(sink.ForceSendFields, "IncludeChildren") + + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err := config.clientLogging.Folders.Sinks.Patch(d.Id(), sink). + UpdateMask(defaultLogSinkUpdateMask).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + return resourceLoggingFolderSinkRead(d, meta) +} + +func resourceLoggingFolderSinkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientLogging.Projects.Sinks.Delete(d.Id()).Do() + if err != nil { + return err + } + + return nil +} diff --git a/provider/terraform/resources/resource_logging_organization_sink.go b/provider/terraform/resources/resource_logging_organization_sink.go new file mode 100644 index 000000000000..9063345ff029 --- /dev/null +++ b/provider/terraform/resources/resource_logging_organization_sink.go @@ -0,0 +1,98 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceLoggingOrganizationSink() *schema.Resource { + schm := &schema.Resource{ + Create: resourceLoggingOrganizationSinkCreate, + Read: resourceLoggingOrganizationSinkRead, + Delete: resourceLoggingOrganizationSinkDelete, + Update: resourceLoggingOrganizationSinkUpdate, + Schema: resourceLoggingSinkSchema(), + Importer: &schema.ResourceImporter{ + State: resourceLoggingSinkImportState("org_id"), + }, + } + schm.Schema["org_id"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + StateFunc: func(v interface{}) string { + return strings.Replace(v.(string), "organizations/", "", 1) + }, + } + schm.Schema["include_children"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + } + + return schm +} + +func resourceLoggingOrganizationSinkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + org := d.Get("org_id").(string) + id, sink := expandResourceLoggingSink(d, "organizations", org) + sink.IncludeChildren = d.Get("include_children").(bool) + + // Must use a unique writer, since all destinations are in projects. + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err := config.clientLogging.Organizations.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + d.SetId(id.canonicalId()) + return resourceLoggingOrganizationSinkRead(d, meta) +} + +func resourceLoggingOrganizationSinkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sink, err := config.clientLogging.Organizations.Sinks.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Organization Logging Sink %s", d.Get("name").(string))) + } + + flattenResourceLoggingSink(d, sink) + d.Set("include_children", sink.IncludeChildren) + + return nil +} + +func resourceLoggingOrganizationSinkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sink := expandResourceLoggingSinkForUpdate(d) + // It seems the API might actually accept an update for include_children; this is not in the list of updatable + // properties though and might break in the future. Always include the value to prevent it changing. + sink.IncludeChildren = d.Get("include_children").(bool) + sink.ForceSendFields = append(sink.ForceSendFields, "IncludeChildren") + + // The API will reject any requests that don't explicitly set 'uniqueWriterIdentity' to true. + _, err := config.clientLogging.Organizations.Sinks.Patch(d.Id(), sink). + UpdateMask(defaultLogSinkUpdateMask).UniqueWriterIdentity(true).Do() + if err != nil { + return err + } + + return resourceLoggingOrganizationSinkRead(d, meta) +} + +func resourceLoggingOrganizationSinkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientLogging.Projects.Sinks.Delete(d.Id()).Do() + if err != nil { + return err + } + + return nil +} diff --git a/provider/terraform/resources/resource_logging_project_sink.go b/provider/terraform/resources/resource_logging_project_sink.go new file mode 100644 index 000000000000..501fb8991359 --- /dev/null +++ b/provider/terraform/resources/resource_logging_project_sink.go @@ -0,0 +1,106 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +const nonUniqueWriterAccount = "serviceAccount:cloud-logs@system.gserviceaccount.com" + +func resourceLoggingProjectSink() *schema.Resource { + schm := &schema.Resource{ + Create: resourceLoggingProjectSinkCreate, + Read: resourceLoggingProjectSinkRead, + Delete: resourceLoggingProjectSinkDelete, + Update: resourceLoggingProjectSinkUpdate, + Schema: resourceLoggingSinkSchema(), + Importer: &schema.ResourceImporter{ + State: resourceLoggingSinkImportState("project"), + }, + } + schm.Schema["project"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + } + schm.Schema["unique_writer_identity"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + } + return schm +} + +func resourceLoggingProjectSinkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + id, sink := expandResourceLoggingSink(d, "projects", project) + uniqueWriterIdentity := d.Get("unique_writer_identity").(bool) + + _, err = config.clientLogging.Projects.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(uniqueWriterIdentity).Do() + if err != nil { + return err + } + + d.SetId(id.canonicalId()) + + return resourceLoggingProjectSinkRead(d, meta) +} + +func resourceLoggingProjectSinkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + sink, err := config.clientLogging.Projects.Sinks.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Project Logging Sink %s", d.Get("name").(string))) + } + + d.Set("project", project) + flattenResourceLoggingSink(d, sink) + if sink.WriterIdentity != nonUniqueWriterAccount { + d.Set("unique_writer_identity", true) + } else { + d.Set("unique_writer_identity", false) + } + return nil +} + +func resourceLoggingProjectSinkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sink := expandResourceLoggingSinkForUpdate(d) + uniqueWriterIdentity := d.Get("unique_writer_identity").(bool) + + _, err := config.clientLogging.Projects.Sinks.Patch(d.Id(), sink). + UpdateMask(defaultLogSinkUpdateMask).UniqueWriterIdentity(uniqueWriterIdentity).Do() + if err != nil { + return err + } + + return resourceLoggingProjectSinkRead(d, meta) +} + +func resourceLoggingProjectSinkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientLogging.Projects.Sinks.Delete(d.Id()).Do() + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/provider/terraform/resources/resource_logging_sink.go b/provider/terraform/resources/resource_logging_sink.go new file mode 100644 index 000000000000..45cd2bda7ba7 --- /dev/null +++ b/provider/terraform/resources/resource_logging_sink.go @@ -0,0 +1,87 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/logging/v2" +) + +// Empty update masks will eventually cause updates to fail, currently empty masks default to this string +const defaultLogSinkUpdateMask = "destination,filter,includeChildren" + +func resourceLoggingSinkSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "destination": { + Type: schema.TypeString, + Required: true, + }, + + "filter": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: optionalSurroundingSpacesSuppress, + }, + + "writer_identity": { + Type: schema.TypeString, + Computed: true, + }, + } +} + +func expandResourceLoggingSink(d *schema.ResourceData, resourceType, resourceId string) (LoggingSinkId, *logging.LogSink) { + id := LoggingSinkId{ + resourceType: resourceType, + resourceId: resourceId, + name: d.Get("name").(string), + } + + sink := logging.LogSink{ + Name: d.Get("name").(string), + Destination: d.Get("destination").(string), + Filter: d.Get("filter").(string), + } + return id, &sink +} + +func flattenResourceLoggingSink(d *schema.ResourceData, sink *logging.LogSink) { + d.Set("name", sink.Name) + d.Set("destination", sink.Destination) + d.Set("filter", sink.Filter) + d.Set("writer_identity", sink.WriterIdentity) +} + +func expandResourceLoggingSinkForUpdate(d *schema.ResourceData) *logging.LogSink { + // Can only update destination/filter right now. Despite the method below using 'Patch', the API requires both + // destination and filter (even if unchanged). + sink := logging.LogSink{ + Destination: d.Get("destination").(string), + Filter: d.Get("filter").(string), + } + + if d.HasChange("destination") { + sink.ForceSendFields = append(sink.ForceSendFields, "Destination") + } + if d.HasChange("filter") { + sink.ForceSendFields = append(sink.ForceSendFields, "Filter") + } + return &sink +} + +func resourceLoggingSinkImportState(sinkType string) schema.StateFunc { + return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + loggingSinkId, err := parseLoggingSinkId(d.Id()) + if err != nil { + return nil, err + } + + d.Set(sinkType, loggingSinkId.resourceId) + + return []*schema.ResourceData{d}, nil + } +} diff --git a/provider/terraform/resources/resource_pubsub_subscription.go b/provider/terraform/resources/resource_pubsub_subscription.go new file mode 100644 index 000000000000..f4731a3f5e8f --- /dev/null +++ b/provider/terraform/resources/resource_pubsub_subscription.go @@ -0,0 +1,228 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/pubsub/v1" + "regexp" +) + +func resourcePubsubSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubSubscriptionCreate, + Read: resourcePubsubSubscriptionRead, + Update: resourcePubsubSubscriptionUpdate, + Delete: resourcePubsubSubscriptionDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePubsubSubscriptionStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "ack_deadline_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "push_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "push_endpoint": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := getComputedSubscriptionName(project, d.Get("name").(string)) + computed_topic_name := getComputedTopicName(project, d.Get("topic").(string)) + + // process optional parameters + var ackDeadlineSeconds int64 + ackDeadlineSeconds = 10 + if v, ok := d.GetOk("ack_deadline_seconds"); ok { + ackDeadlineSeconds = int64(v.(int)) + } + + subscription := &pubsub.Subscription{ + AckDeadlineSeconds: ackDeadlineSeconds, + Topic: computed_topic_name, + PushConfig: expandPubsubSubscriptionPushConfig(d.Get("push_config").([]interface{})), + } + + call := config.clientPubsub.Projects.Subscriptions.Create(name, subscription) + res, err := call.Do() + if err != nil { + return err + } + + d.SetId(res.Name) + + return resourcePubsubSubscriptionRead(d, meta) +} + +func getComputedTopicName(project, topic string) string { + match, _ := regexp.MatchString("projects\\/.*\\/topics\\/.*", topic) + if match { + return topic + } + return fmt.Sprintf("projects/%s/topics/%s", project, topic) +} + +func getComputedSubscriptionName(project, subscription string) string { + match, _ := regexp.MatchString("projects\\/.*\\/subscriptions\\/.*", subscription) + if match { + return subscription + } + return fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +} + +func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Id() + subscription, err := config.clientPubsub.Projects.Subscriptions.Get(name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Subscription %q", name)) + } + + d.Set("name", GetResourceNameFromSelfLink(subscription.Name)) + d.Set("topic", subscription.Topic) + d.Set("ack_deadline_seconds", subscription.AckDeadlineSeconds) + d.Set("path", subscription.Name) + d.Set("push_config", flattenPubsubSubscriptionPushConfig(subscription.PushConfig)) + d.Set("project", project) + + return nil +} + +func resourcePubsubSubscriptionUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("push_config") { + _, err := config.clientPubsub.Projects.Subscriptions.ModifyPushConfig(d.Id(), &pubsub.ModifyPushConfigRequest{ + PushConfig: expandPubsubSubscriptionPushConfig(d.Get("push_config").([]interface{})), + }).Do() + + if err != nil { + return fmt.Errorf("Error updating subscription %q: %s", d.Get("name"), err) + } + } + + d.Partial(false) + + return nil +} + +func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Subscriptions.Delete(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} + +func resourcePubsubSubscriptionStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + id := fmt.Sprintf("projects/%s/subscriptions/%s", project, d.Id()) + + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenPubsubSubscriptionPushConfig(pushConfig *pubsub.PushConfig) []map[string]interface{} { + configs := make([]map[string]interface{}, 0, 1) + + if pushConfig == nil || len(pushConfig.PushEndpoint) == 0 { + return configs + } + + configs = append(configs, map[string]interface{}{ + "push_endpoint": pushConfig.PushEndpoint, + "attributes": pushConfig.Attributes, + }) + + return configs +} + +func expandPubsubSubscriptionPushConfig(configured []interface{}) *pubsub.PushConfig { + if len(configured) == 0 || configured[0] == nil { + // An empty `pushConfig` indicates that the Pub/Sub system should stop pushing messages + // from the given subscription and allow messages to be pulled and acknowledged. + return &pubsub.PushConfig{} + } + + pushConfig := configured[0].(map[string]interface{}) + return &pubsub.PushConfig{ + PushEndpoint: pushConfig["push_endpoint"].(string), + Attributes: convertStringMap(pushConfig["attributes"].(map[string]interface{})), + } +} diff --git a/provider/terraform/resources/resource_pubsub_topic.go b/provider/terraform/resources/resource_pubsub_topic.go new file mode 100644 index 000000000000..dc62559500ff --- /dev/null +++ b/provider/terraform/resources/resource_pubsub_topic.go @@ -0,0 +1,112 @@ +package google + +import ( + "fmt" + "regexp" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/pubsub/v1" +) + +func resourcePubsubTopic() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubTopicCreate, + Read: resourcePubsubTopicRead, + Delete: resourcePubsubTopicDelete, + + Importer: &schema.ResourceImporter{ + State: resourcePubsubTopicStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := fmt.Sprintf("projects/%s/topics/%s", project, d.Get("name").(string)) + topic := &pubsub.Topic{} + + call := config.clientPubsub.Projects.Topics.Create(name, topic) + res, err := call.Do() + if err != nil { + return err + } + + d.SetId(res.Name) + + return nil +} + +func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Id() + call := config.clientPubsub.Projects.Topics.Get(name) + res, err := call.Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Topic %q", name)) + } + + d.Set("name", GetResourceNameFromSelfLink(res.Name)) + d.Set("project", project) + + return nil +} + +func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Topics.Delete(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} + +func resourcePubsubTopicStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + + topicId := regexp.MustCompile("^projects/[^/]+/topics/[^/]+$") + if topicId.MatchString(d.Id()) { + return []*schema.ResourceData{d}, nil + } + + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") + } + + id := fmt.Sprintf("projects/%s/topics/%s", config.Project, d.Id()) + + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_runtimeconfig_config.go b/provider/terraform/resources/resource_runtimeconfig_config.go new file mode 100644 index 000000000000..151b6c4c3d5f --- /dev/null +++ b/provider/terraform/resources/resource_runtimeconfig_config.go @@ -0,0 +1,166 @@ +package google + +import ( + "fmt" + "regexp" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/runtimeconfig/v1beta1" +) + +var runtimeConfigFullName *regexp.Regexp = regexp.MustCompile("^projects/([^/]+)/configs/(.+)$") + +func resourceRuntimeconfigConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceRuntimeconfigConfigCreate, + Read: resourceRuntimeconfigConfigRead, + Update: resourceRuntimeconfigConfigUpdate, + Delete: resourceRuntimeconfigConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRuntimeconfigConfigImport, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateGCPName, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceRuntimeconfigConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + fullName := resourceRuntimeconfigFullName(project, name) + runtimeConfig := runtimeconfig.RuntimeConfig{ + Name: fullName, + } + + if val, ok := d.GetOk("description"); ok { + runtimeConfig.Description = val.(string) + } + + _, err = config.clientRuntimeconfig.Projects.Configs.Create("projects/"+project, &runtimeConfig).Do() + + if err != nil { + return err + } + d.SetId(fullName) + + return nil +} + +func resourceRuntimeconfigConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + fullName := d.Id() + runConfig, err := config.clientRuntimeconfig.Projects.Configs.Get(fullName).Do() + if err != nil { + return err + } + + project, name, err := resourceRuntimeconfigParseFullName(runConfig.Name) + if err != nil { + return err + } + // Check to see if project matches our current defined value - if it doesn't, we'll explicitly set it + curProject, err := getProject(d, config) + if err != nil { + return err + } + if project != curProject { + d.Set("project", project) + } + + d.Set("name", name) + d.Set("description", runConfig.Description) + d.Set("project", project) + + return nil +} + +func resourceRuntimeconfigConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Update works more like an 'overwrite' method - we build a new runtimeconfig.RuntimeConfig struct and it becomes + // the new config. This means our Update logic looks an awful lot like Create (and hence, doesn't use + // schema.ResourceData.hasChange()). + fullName := d.Id() + runtimeConfig := runtimeconfig.RuntimeConfig{ + Name: fullName, + } + if v, ok := d.GetOk("description"); ok { + runtimeConfig.Description = v.(string) + } + + _, err := config.clientRuntimeconfig.Projects.Configs.Update(fullName, &runtimeConfig).Do() + if err != nil { + return err + } + return nil +} + +func resourceRuntimeconfigConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + fullName := d.Id() + + _, err := config.clientRuntimeconfig.Projects.Configs.Delete(fullName).Do() + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceRuntimeconfigConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{"projects/(?P[^/]+)/configs/(?P[^/]+)", "(?P[^/]+)"}, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/configs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +// resourceRuntimeconfigFullName turns a given project and a 'short name' for a runtime config into a full name +// (e.g. projects/my-project/configs/my-config). +func resourceRuntimeconfigFullName(project, name string) string { + return fmt.Sprintf("projects/%s/configs/%s", project, name) +} + +// resourceRuntimeconfigParseFullName parses a full name (e.g. projects/my-project/configs/my-config) by parsing out the +// project and the short name. Returns "", "", nil upon error. +func resourceRuntimeconfigParseFullName(fullName string) (project, name string, err error) { + matches := runtimeConfigFullName.FindStringSubmatch(fullName) + if matches == nil { + return "", "", fmt.Errorf("Given full name doesn't match expected regexp; fullname = '%s'", fullName) + } + return matches[1], matches[2], nil +} diff --git a/provider/terraform/resources/resource_runtimeconfig_variable.go b/provider/terraform/resources/resource_runtimeconfig_variable.go new file mode 100644 index 000000000000..6601faa38001 --- /dev/null +++ b/provider/terraform/resources/resource_runtimeconfig_variable.go @@ -0,0 +1,207 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/runtimeconfig/v1beta1" + "regexp" +) + +func resourceRuntimeconfigVariable() *schema.Resource { + return &schema.Resource{ + Create: resourceRuntimeconfigVariableCreate, + Read: resourceRuntimeconfigVariableRead, + Update: resourceRuntimeconfigVariableUpdate, + Delete: resourceRuntimeconfigVariableDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRuntimeconfigVariableImport, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "value": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"text"}, + }, + + "text": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"value"}, + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceRuntimeconfigVariableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + variable, parent, err := newRuntimeconfigVariableFromResourceData(d, project) + if err != nil { + return err + } + + createdVariable, err := config.clientRuntimeconfig.Projects.Configs.Variables.Create(resourceRuntimeconfigFullName(project, parent), variable).Do() + if err != nil { + return err + } + d.SetId(createdVariable.Name) + + return setRuntimeConfigVariableToResourceData(d, *createdVariable) +} + +func resourceRuntimeconfigVariableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + fullName := d.Id() + createdVariable, err := config.clientRuntimeconfig.Projects.Configs.Variables.Get(fullName).Do() + if err != nil { + return err + } + + return setRuntimeConfigVariableToResourceData(d, *createdVariable) +} + +func resourceRuntimeconfigVariableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + // Update works more like an 'overwrite' method - we build a new runtimeconfig.Variable struct and it becomes the + // new config. This means our Update logic looks an awful lot like Create (and hence, doesn't use + // schema.ResourceData.hasChange()). + + variable, _, err := newRuntimeconfigVariableFromResourceData(d, project) + if err != nil { + return err + } + + createdVariable, err := config.clientRuntimeconfig.Projects.Configs.Variables.Update(variable.Name, variable).Do() + if err != nil { + return err + } + + return setRuntimeConfigVariableToResourceData(d, *createdVariable) +} + +func resourceRuntimeconfigVariableDelete(d *schema.ResourceData, meta interface{}) error { + fullName := d.Id() + config := meta.(*Config) + + _, err := config.clientRuntimeconfig.Projects.Configs.Variables.Delete(fullName).Do() + if err != nil { + return err + } + d.SetId("") + + return nil +} + +func resourceRuntimeconfigVariableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{"projects/(?P[^/]+)/configs/(?P[^/]+)/variables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/configs/{{parent}}/variables/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +// resourceRuntimeconfigVariableFullName turns a given project, runtime config name, and a 'short name' for a runtime +// config variable into a full name (e.g. projects/my-project/configs/my-config/variables/my-variable). +func resourceRuntimeconfigVariableFullName(project, config, name string) string { + return fmt.Sprintf("projects/%s/configs/%s/variables/%s", project, config, name) +} + +// resourceRuntimeconfigVariableParseFullName parses a full name +// (e.g. projects/my-project/configs/my-config/variables/my-variable) by parsing out the +// project, runtime config name, and the short name. Returns "", "", "", err upon error. +func resourceRuntimeconfigVariableParseFullName(fullName string) (project, config, name string, err error) { + re := regexp.MustCompile("^projects/([^/]+)/configs/([^/]+)/variables/(.+)$") + matches := re.FindStringSubmatch(fullName) + if matches == nil { + return "", "", "", fmt.Errorf("Given full name doesn't match expected regexp; fullname = '%s'", fullName) + } + return matches[1], matches[2], matches[3], nil +} + +// newRuntimeconfigVariableFromResourceData builds a new runtimeconfig.Variable struct from the data stored in a +// schema.ResourceData. Also returns the full name of the parent. Returns nil, "", err upon error. +func newRuntimeconfigVariableFromResourceData(d *schema.ResourceData, project string) (variable *runtimeconfig.Variable, parent string, err error) { + // Validate that both text and value are not set + text, textSet := d.GetOk("text") + value, valueSet := d.GetOk("value") + + if !textSet && !valueSet { + return nil, "", fmt.Errorf("You must specify one of value or text.") + } + + // TODO(selmanj) here we assume it's a simple name, not a full name. Should probably support full name as well + parent = d.Get("parent").(string) + name := d.Get("name").(string) + + fullName := resourceRuntimeconfigVariableFullName(project, parent, name) + + variable = &runtimeconfig.Variable{ + Name: fullName, + } + + if textSet { + variable.Text = text.(string) + } else { + variable.Value = value.(string) + } + + return variable, parent, nil +} + +// setRuntimeConfigVariableToResourceData stores a provided runtimeconfig.Variable struct inside a schema.ResourceData. +func setRuntimeConfigVariableToResourceData(d *schema.ResourceData, variable runtimeconfig.Variable) error { + varProject, parent, name, err := resourceRuntimeconfigVariableParseFullName(variable.Name) + if err != nil { + return err + } + d.Set("name", name) + d.Set("parent", parent) + d.Set("project", varProject) + d.Set("value", variable.Value) + d.Set("text", variable.Text) + d.Set("update_time", variable.UpdateTime) + + return nil +} diff --git a/provider/terraform/resources/resource_source_repos_repository.go b/provider/terraform/resources/resource_source_repos_repository.go new file mode 100644 index 000000000000..de228dcd0294 --- /dev/null +++ b/provider/terraform/resources/resource_source_repos_repository.go @@ -0,0 +1,134 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/sourcerepo/v1" +) + +func resourceSourceRepoRepository() *schema.Resource { + return &schema.Resource{ + Create: resourceSourceRepoRepositoryCreate, + Read: resourceSourceRepoRepositoryRead, + Delete: resourceSourceRepoRepositoryDelete, + //Update: not supported, + + Importer: &schema.ResourceImporter{ + State: resourceSourceRepoRepositoryImport, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceSourceRepoRepositoryCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + repoName := d.Get("name").(string) + name := buildRepositoryName(project, repoName) + + repo := &sourcerepo.Repo{ + Name: name, + } + + parent := "projects/" + project + + op, err := config.clientSourceRepo.Projects.Repos.Create(parent, repo).Do() + if err != nil { + return fmt.Errorf("Error creating the Source Repo: %s", err) + } + d.SetId(op.Name) + + return nil +} + +func resourceSourceRepoRepositoryRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + repoName := d.Get("name").(string) + name := buildRepositoryName(project, repoName) + + repo, err := config.clientSourceRepo.Projects.Repos.Get(name).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Source Repo %q", d.Id())) + } + + d.Set("size", repo.Size) + d.Set("project", project) + d.Set("url", repo.Url) + + return nil +} + +func resourceSourceRepoRepositoryDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + repoName := d.Get("name").(string) + name := buildRepositoryName(project, repoName) + + _, err = config.clientSourceRepo.Projects.Repos.Delete(name).Do() + if err != nil { + return fmt.Errorf("Error deleting the Source Repo: %s", err) + } + + return nil +} + +func buildRepositoryName(project, name string) string { + repositoryName := "projects/" + project + "/repos/" + name + return repositoryName +} + +func resourceSourceRepoRepositoryImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{"projects/(?P[^/]+)/repos/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_spanner_database.go b/provider/terraform/resources/resource_spanner_database.go new file mode 100644 index 000000000000..639b0102abb4 --- /dev/null +++ b/provider/terraform/resources/resource_spanner_database.go @@ -0,0 +1,246 @@ +package google + +import ( + "fmt" + "log" + "net/http" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/googleapi" + "google.golang.org/api/spanner/v1" +) + +func resourceSpannerDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceSpannerDatabaseCreate, + Read: resourceSpannerDatabaseRead, + Delete: resourceSpannerDatabaseDelete, + Importer: &schema.ResourceImporter{ + State: resourceSpannerDatabaseImportState, + }, + + Schema: map[string]*schema.Schema{ + + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) < 2 && len(value) > 30 { + errors = append(errors, fmt.Errorf( + "%q must be between 2 and 30 characters in length", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "ddl": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceSpannerDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + id, err := buildSpannerDatabaseId(d, config) + if err != nil { + return err + } + + cdr := &spanner.CreateDatabaseRequest{} + cdr.CreateStatement = fmt.Sprintf("CREATE DATABASE `%s`", id.Database) + if v, ok := d.GetOk("ddl"); ok { + cdr.ExtraStatements = convertStringArr(v.([]interface{})) + } + + op, err := config.clientSpanner.Projects.Instances.Databases.Create( + id.parentInstanceUri(), cdr).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusConflict { + return fmt.Errorf("Error, A database with name %s already exists in this instance", id.Database) + } + return fmt.Errorf("Error, failed to create database %s: %s", id.Database, err) + } + + d.SetId(id.terraformId()) + + // Wait until it's created + timeoutMins := int(d.Timeout(schema.TimeoutCreate).Minutes()) + waitErr := spannerDatabaseOperationWait(config, op, "Creating Spanner database", timeoutMins) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] Spanner database %s has been created", id.terraformId()) + return resourceSpannerDatabaseRead(d, meta) +} + +func resourceSpannerDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + id, err := buildSpannerDatabaseId(d, config) + if err != nil { + return err + } + + db, err := config.clientSpanner.Projects.Instances.Databases.Get( + id.databaseUri()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Spanner database %q", id.databaseUri())) + } + + d.Set("state", db.State) + d.Set("project", id.Project) + return nil +} + +func resourceSpannerDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + id, err := buildSpannerDatabaseId(d, config) + if err != nil { + return err + } + + _, err = config.clientSpanner.Projects.Instances.Databases.DropDatabase( + id.databaseUri()).Do() + if err != nil { + return fmt.Errorf("Error, failed to delete Spanner Database %s: %s", id.databaseUri(), err) + } + + d.SetId("") + return nil +} + +func resourceSpannerDatabaseImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + id, err := importSpannerDatabaseId(d.Id()) + if err != nil { + return nil, err + } + + if id.Project != "" { + d.Set("project", id.Project) + } else { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + id.Project = project + } + + d.Set("instance", id.Instance) + d.Set("name", id.Database) + d.SetId(id.terraformId()) + + return []*schema.ResourceData{d}, nil +} + +func buildSpannerDatabaseId(d *schema.ResourceData, config *Config) (*spannerDatabaseId, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + dbName := d.Get("name").(string) + instanceName := d.Get("instance").(string) + + return &spannerDatabaseId{ + Project: project, + Instance: instanceName, + Database: dbName, + }, nil +} + +type spannerDatabaseId struct { + Project string + Instance string + Database string +} + +func (s spannerDatabaseId) terraformId() string { + return fmt.Sprintf("%s/%s/%s", s.Project, s.Instance, s.Database) +} + +func (s spannerDatabaseId) parentProjectUri() string { + return fmt.Sprintf("projects/%s", s.Project) +} + +func (s spannerDatabaseId) parentInstanceUri() string { + return fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) +} + +func (s spannerDatabaseId) databaseUri() string { + return fmt.Sprintf("%s/databases/%s", s.parentInstanceUri(), s.Database) +} + +func importSpannerDatabaseId(id string) (*spannerDatabaseId, error) { + if !regexp.MustCompile("^[a-z0-9-]+/[a-z0-9-]+$").Match([]byte(id)) && + !regexp.MustCompile("^"+ProjectRegex+"/[a-z0-9-]+/[a-z0-9-]+$").Match([]byte(id)) { + return nil, fmt.Errorf("Invalid spanner database specifier. " + + "Expecting either {projectId}/{instanceId}/{dbId} OR " + + "{instanceId}/{dbId} (where project will be derived from the provider)") + } + + parts := strings.Split(id, "/") + if len(parts) == 2 { + log.Printf("[INFO] Spanner database import format of {instanceId}/{dbId} specified: %s", id) + return &spannerDatabaseId{Instance: parts[0], Database: parts[1]}, nil + } + + log.Printf("[INFO] Spanner database import format of {projectId}/{instanceId}/{dbId} specified: %s", id) + return extractSpannerDatabaseId(id) +} + +func extractSpannerDatabaseId(id string) (*spannerDatabaseId, error) { + if !regexp.MustCompile("^" + ProjectRegex + "/[a-z0-9-]+/[a-z0-9-]+$").Match([]byte(id)) { + return nil, fmt.Errorf("Invalid spanner id format, expecting {projectId}/{instanceId}/{databaseId}") + } + parts := strings.Split(id, "/") + return &spannerDatabaseId{ + Project: parts[0], + Instance: parts[1], + Database: parts[2], + }, nil +} diff --git a/provider/terraform/resources/resource_spanner_instance.go b/provider/terraform/resources/resource_spanner_instance.go new file mode 100644 index 000000000000..7efd896b8e0b --- /dev/null +++ b/provider/terraform/resources/resource_spanner_instance.go @@ -0,0 +1,326 @@ +package google + +import ( + "fmt" + "log" + "net/http" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/googleapi" + "google.golang.org/api/spanner/v1" +) + +func resourceSpannerInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSpannerInstanceCreate, + Read: resourceSpannerInstanceRead, + Update: resourceSpannerInstanceUpdate, + Delete: resourceSpannerInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceSpannerInstanceImportState, + }, + + Schema: map[string]*schema.Schema{ + + "config": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) < 6 && len(value) > 30 { + errors = append(errors, fmt.Errorf( + "%q must be between 6 and 30 characters in length", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "display_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) < 4 && len(value) > 30 { + errors = append(errors, fmt.Errorf( + "%q must be between 4 and 30 characters in length", k)) + } + return + }, + }, + + "num_nodes": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceSpannerInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + cir := &spanner.CreateInstanceRequest{ + Instance: &spanner.Instance{}, + } + + if v, ok := d.GetOk("name"); ok { + cir.InstanceId = v.(string) + } else { + cir.InstanceId = genSpannerInstanceName() + d.Set("name", cir.InstanceId) + } + + if v, ok := d.GetOk("labels"); ok { + cir.Instance.Labels = convertStringMap(v.(map[string]interface{})) + } + + id, err := buildSpannerInstanceId(d, config) + if err != nil { + return err + } + + cir.Instance.Config = id.instanceConfigUri(d.Get("config").(string)) + cir.Instance.DisplayName = d.Get("display_name").(string) + cir.Instance.NodeCount = int64(d.Get("num_nodes").(int)) + + op, err := config.clientSpanner.Projects.Instances.Create( + id.parentProjectUri(), cir).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusConflict { + return fmt.Errorf("Error, the name %s is not unique within project %s", id.Instance, id.Project) + } + return fmt.Errorf("Error, failed to create instance %s: %s", id.terraformId(), err) + } + + d.SetId(id.terraformId()) + + // Wait until it's created + timeoutMins := int(d.Timeout(schema.TimeoutCreate).Minutes()) + waitErr := spannerInstanceOperationWait(config, op, "Creating Spanner instance", timeoutMins) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] Spanner instance %s has been created", id.terraformId()) + return resourceSpannerInstanceRead(d, meta) +} + +func resourceSpannerInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + id, err := buildSpannerInstanceId(d, config) + if err != nil { + return err + } + + instance, err := config.clientSpanner.Projects.Instances.Get( + id.instanceUri()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Spanner instance %s", id.terraformId())) + } + + d.Set("config", GetResourceNameFromSelfLink(instance.Config)) + d.Set("labels", instance.Labels) + d.Set("display_name", instance.DisplayName) + d.Set("num_nodes", instance.NodeCount) + d.Set("state", instance.State) + d.Set("project", id.Project) + + return nil +} + +func resourceSpannerInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + log.Printf("[INFO] About to update Spanner Instance %s ", d.Id()) + uir := &spanner.UpdateInstanceRequest{ + Instance: &spanner.Instance{}, + } + + id, err := buildSpannerInstanceId(d, config) + if err != nil { + return err + } + + fieldMask := []string{} + if d.HasChange("num_nodes") { + fieldMask = append(fieldMask, "nodeCount") + uir.Instance.NodeCount = int64(d.Get("num_nodes").(int)) + } + if d.HasChange("display_name") { + fieldMask = append(fieldMask, "displayName") + uir.Instance.DisplayName = d.Get("display_name").(string) + } + if d.HasChange("labels") { + fieldMask = append(fieldMask, "labels") + uir.Instance.Labels = convertStringMap(d.Get("labels").(map[string]interface{})) + } + + uir.FieldMask = strings.Join(fieldMask, ",") + op, err := config.clientSpanner.Projects.Instances.Patch( + id.instanceUri(), uir).Do() + if err != nil { + return err + } + + // Wait until it's updated + timeoutMins := int(d.Timeout(schema.TimeoutUpdate).Minutes()) + err = spannerInstanceOperationWait(config, op, "Update Spanner Instance", timeoutMins) + if err != nil { + return err + } + + log.Printf("[INFO] Spanner Instance %s has been updated ", id.terraformId()) + return resourceSpannerInstanceRead(d, meta) +} + +func resourceSpannerInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + id, err := buildSpannerInstanceId(d, config) + if err != nil { + return err + } + + _, err = config.clientSpanner.Projects.Instances.Delete( + id.instanceUri()).Do() + if err != nil { + return fmt.Errorf("Error, failed to delete Spanner Instance %s in project %s: %s", id.Instance, id.Project, err) + } + + d.SetId("") + return nil +} + +func resourceSpannerInstanceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + id, err := importSpannerInstanceId(d.Id()) + if err != nil { + return nil, err + } + + if id.Project != "" { + d.Set("project", id.Project) + } else { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + id.Project = project + } + + d.Set("name", id.Instance) + d.SetId(id.terraformId()) + + return []*schema.ResourceData{d}, nil +} + +func buildSpannerInstanceId(d *schema.ResourceData, config *Config) (*spannerInstanceId, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + return &spannerInstanceId{ + Project: project, + Instance: d.Get("name").(string), + }, nil +} + +func genSpannerInstanceName() string { + return resource.PrefixedUniqueId("tfgen-spanid-")[:30] +} + +type spannerInstanceId struct { + Project string + Instance string +} + +func (s spannerInstanceId) terraformId() string { + return fmt.Sprintf("%s/%s", s.Project, s.Instance) +} + +func (s spannerInstanceId) parentProjectUri() string { + return fmt.Sprintf("projects/%s", s.Project) +} + +func (s spannerInstanceId) instanceUri() string { + return fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) +} + +func (s spannerInstanceId) instanceConfigUri(c string) string { + return fmt.Sprintf("%s/instanceConfigs/%s", s.parentProjectUri(), c) +} + +func importSpannerInstanceId(id string) (*spannerInstanceId, error) { + if !regexp.MustCompile("^[a-z0-9-]+$").Match([]byte(id)) && + !regexp.MustCompile("^"+ProjectRegex+"/[a-z0-9-]+$").Match([]byte(id)) { + return nil, fmt.Errorf("Invalid spanner instance specifier. " + + "Expecting either {projectId}/{instanceId} OR " + + "{instanceId} (where project is to be derived from that specified in provider)") + } + + parts := strings.Split(id, "/") + if len(parts) == 1 { + log.Printf("[INFO] Spanner instance import format of {instanceId} specified: %s", id) + return &spannerInstanceId{Instance: parts[0]}, nil + } + + log.Printf("[INFO] Spanner instance import format of {projectId}/{instanceId} specified: %s", id) + return extractSpannerInstanceId(id) +} + +func extractSpannerInstanceId(id string) (*spannerInstanceId, error) { + if !regexp.MustCompile("^" + ProjectRegex + "/[a-z0-9-]+$").Match([]byte(id)) { + return nil, fmt.Errorf("Invalid spanner id format, expecting {projectId}/{instanceId}") + } + parts := strings.Split(id, "/") + return &spannerInstanceId{ + Project: parts[0], + Instance: parts[1], + }, nil +} diff --git a/provider/terraform/resources/resource_sql_database.go b/provider/terraform/resources/resource_sql_database.go new file mode 100644 index 000000000000..5079c1c53dad --- /dev/null +++ b/provider/terraform/resources/resource_sql_database.go @@ -0,0 +1,248 @@ +package google + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseCreate, + Read: resourceSqlDatabaseRead, + Update: resourceSqlDatabaseUpdate, + Delete: resourceSqlDatabaseDelete, + Importer: &schema.ResourceImporter{ + State: resourceSqlDatabaseImport, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "charset": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "collation": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + } +} + +func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + d.SetId(instance_name + ":" + database_name) + + db := &sqladmin.Database{ + Name: database_name, + Instance: instance_name, + Charset: d.Get("charset").(string), + Collation: d.Get("collation").(string), + } + + mutexKV.Lock(instanceMutexKey(project, instance_name)) + defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) + + var op *sqladmin.Operation + err = retryTime(func() error { + op, err = config.clientSqlAdmin.Databases.Insert(project, instance_name, db).Do() + return err + }, 5 /* minutes */) + + if err != nil { + return fmt.Errorf("Error, failed to insert "+ + "database %s into instance %s: %s", database_name, + instance_name, err) + } + + err = sqladminOperationWaitTime(config, op, project, "Insert Database", int(d.Timeout(schema.TimeoutCreate).Minutes())) + + if err != nil { + return fmt.Errorf("Error, failure waiting for insertion of %s "+ + "into %s: %s", database_name, instance_name, err) + } + + return resourceSqlDatabaseRead(d, meta) +} + +func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + s := strings.Split(d.Id(), ":") + + if len(s) != 2 { + return fmt.Errorf("Error, failure importing database %s. "+ + "ID format is instance:name", d.Id()) + } + + instance_name := s[0] + database_name := s[1] + + var db *sqladmin.Database + err = retryTime(func() error { + db, err = config.clientSqlAdmin.Databases.Get(project, instance_name, database_name).Do() + return err + }, 5 /* minutes */) + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SQL Database %q in instance %q", database_name, instance_name)) + } + + d.Set("instance", db.Instance) + d.Set("name", db.Name) + d.Set("self_link", db.SelfLink) + d.SetId(instance_name + ":" + database_name) + d.Set("charset", db.Charset) + d.Set("collation", db.Collation) + d.Set("project", project) + + return nil +} + +func resourceSqlDatabaseUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + + db := &sqladmin.Database{ + Name: database_name, + Instance: instance_name, + Charset: d.Get("charset").(string), + Collation: d.Get("collation").(string), + } + + mutexKV.Lock(instanceMutexKey(project, instance_name)) + defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) + + var op *sqladmin.Operation + err = retryTime(func() error { + op, err = config.clientSqlAdmin.Databases.Update(project, instance_name, database_name, db).Do() + return err + }, 5 /* minutes */) + + if err != nil { + return fmt.Errorf("Error, failed to update "+ + "database %s in instance %s: %s", database_name, + instance_name, err) + } + + err = sqladminOperationWaitTime(config, op, project, "Update Database", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + + if err != nil { + return fmt.Errorf("Error, failure waiting for update of %s "+ + "into %s: %s", database_name, instance_name, err) + } + + return resourceSqlDatabaseRead(d, meta) +} + +func resourceSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + + mutexKV.Lock(instanceMutexKey(project, instance_name)) + defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) + + var op *sqladmin.Operation + err = retryTime(func() error { + op, err = config.clientSqlAdmin.Databases.Delete(project, instance_name, database_name).Do() + return err + }, 5 /* minutes */) + + if err != nil { + return fmt.Errorf("Error, failed to delete"+ + "database %s in instance %s: %s", database_name, + instance_name, err) + } + + err = sqladminOperationWaitTime(config, op, project, "Delete Database", int(d.Timeout(schema.TimeoutDelete).Minutes())) + + if err != nil { + return fmt.Errorf("Error, failure waiting for deletion of %s "+ + "in %s: %s", database_name, instance_name, err) + } + + return nil +} + +func resourceSqlDatabaseImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)", + "instances/(?P[^/]+)/databases/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+):(?P[^/]+)", + }, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "{{instance}}:{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_sql_database_instance.go b/provider/terraform/resources/resource_sql_database_instance.go new file mode 100644 index 000000000000..8dcc259dea30 --- /dev/null +++ b/provider/terraform/resources/resource_sql_database_instance.go @@ -0,0 +1,989 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/customdiff" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + + "google.golang.org/api/googleapi" + "google.golang.org/api/sqladmin/v1beta4" +) + +var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiration_time": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, +} + +func resourceSqlDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseInstanceCreate, + Read: resourceSqlDatabaseInstanceRead, + Update: resourceSqlDatabaseInstanceUpdate, + Delete: resourceSqlDatabaseInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceSqlDatabaseInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("settings.0.disk_size", isDiskShrinkage)), + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "settings": &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "tier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "activation_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + // Defaults differ between first and second gen instances + Computed: true, + }, + "authorized_gae_applications": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "availability_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressFirstGen, + // Set computed instead of default because this property is for second-gen + // only. The default when not provided is ZONAL, which means no explicit HA + // configuration. + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"REGIONAL", "ZONAL"}, false), + }, + "backup_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_log_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "start_time": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + // start_time is randomly assigned if not set + Computed: true, + }, + }, + }, + }, + "crash_safe_replication": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "database_flags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "disk_autoresize": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + DiffSuppressFunc: suppressFirstGen, + }, + "disk_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + // Defaults differ between first and second gen instances + Computed: true, + }, + "disk_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + // Set computed instead of default because this property is for second-gen only. + Computed: true, + }, + "ip_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_networks": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), + Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + }, + "ipv4_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + // Defaults differ between first and second gen instances + Computed: true, + }, + "require_ssl": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "location_preference": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "follow_gae_application": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "maintenance_window": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 7), + }, + "hour": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + }, + "update_track": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "pricing_plan": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "PER_USE", + }, + "replication_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "SYNCHRONOUS", + }, + "user_labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + + "connection_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "database_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "MYSQL_5_6", + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "time_to_retire": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + + "first_ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "master_instance_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "replica_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + // Returned from API on all replicas + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "client_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "client_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "connect_retry_interval": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "dump_file_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "failover_target": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "master_heartbeat_period": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + "ssl_cipher": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "verify_server_certificate": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "server_ca_cert": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "common_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "create_time": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "expiration_time": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "sha1_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "service_account_email_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// Suppress diff with any attribute value that is not supported on 1st Generation +// Instances +func suppressFirstGen(k, old, new string, d *schema.ResourceData) bool { + if isFirstGen(d) { + log.Printf("[DEBUG] suppressing diff on %s due to 1st gen instance type", k) + return true + } + + return false +} + +// Detects whether a database is 1st Generation by inspecting the tier name +func isFirstGen(d *schema.ResourceData) bool { + settingsList := d.Get("settings").([]interface{}) + settings := settingsList[0].(map[string]interface{}) + tier := settings["tier"].(string) + + // 1st Generation databases have tiers like 'D0', as opposed to 2nd Generation which are + // prefixed with 'db' + return !regexp.MustCompile("db*").Match([]byte(tier)) +} + +func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else { + name = resource.UniqueId() + } + + d.Set("name", name) + + instance := &sqladmin.DatabaseInstance{ + Name: name, + Region: region, + Settings: expandSqlDatabaseInstanceSettings(d.Get("settings").([]interface{}), !isFirstGen(d)), + DatabaseVersion: d.Get("database_version").(string), + MasterInstanceName: d.Get("master_instance_name").(string), + ReplicaConfiguration: expandReplicaConfiguration(d.Get("replica_configuration").([]interface{})), + } + + // Modifying a replica during Create can cause problems if the master is + // modified at the same time. Lock the master until we're done in order + // to prevent that. + if !sqlDatabaseIsMaster(d) { + mutexKV.Lock(instanceMutexKey(project, instance.MasterInstanceName)) + defer mutexKV.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) + } + + op, err := config.clientSqlAdmin.Instances.Insert(project, instance).Do() + if err != nil { + if googleapiError, ok := err.(*googleapi.Error); ok && googleapiError.Code == 409 { + return fmt.Errorf("Error, the name %s is unavailable because it was used recently", instance.Name) + } else { + return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) + } + } + + d.SetId(instance.Name) + + err = sqladminOperationWaitTime(config, op, project, "Create Instance", int(d.Timeout(schema.TimeoutCreate).Minutes())) + if err != nil { + d.SetId("") + return err + } + + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + + // If a default root user was created with a wildcard ('%') hostname, delete it. + // Users in a replica instance are inherited from the master instance and should be left alone. + if sqlDatabaseIsMaster(d) { + var users *sqladmin.UsersListResponse + err = retryTime(func() error { + users, err = config.clientSqlAdmin.Users.List(project, instance.Name).Do() + return err + }, 5) + if err != nil { + return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) + } + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + err = retry(func() error { + op, err = config.clientSqlAdmin.Users.Delete(project, instance.Name, u.Host, u.Name).Do() + if err == nil { + err = sqladminOperationWaitTime(config, op, project, "Delete default root User", int(d.Timeout(schema.TimeoutCreate).Minutes())) + } + return err + }) + if err != nil { + return fmt.Errorf("Error, failed to delete default 'root'@'*' user, but the database was created successfully: %s", err) + } + } + } + } + + return nil +} + +func expandSqlDatabaseInstanceSettings(configured []interface{}, secondGen bool) *sqladmin.Settings { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _settings := configured[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + // Version is unset in Create but is set during update + SettingsVersion: int64(_settings["version"].(int)), + Tier: _settings["tier"].(string), + ForceSendFields: []string{"StorageAutoResize"}, + ActivationPolicy: _settings["activation_policy"].(string), + AvailabilityType: _settings["availability_type"].(string), + CrashSafeReplicationEnabled: _settings["crash_safe_replication"].(bool), + DataDiskSizeGb: int64(_settings["disk_size"].(int)), + DataDiskType: _settings["disk_type"].(string), + PricingPlan: _settings["pricing_plan"].(string), + ReplicationType: _settings["replication_type"].(string), + UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})), + BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), + DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})), + AuthorizedGaeApplications: expandAuthorizedGaeApplications(_settings["authorized_gae_applications"].([]interface{})), + IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})), + LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), + MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), + } + + // 1st Generation instances don't support the disk_autoresize parameter + // and it defaults to true - so we shouldn't set it if this is first gen + if secondGen { + settings.StorageAutoResize = googleapi.Bool(_settings["disk_autoresize"].(bool)) + } + + return settings +} + +func expandReplicaConfiguration(configured []interface{}) *sqladmin.ReplicaConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _replicaConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.ReplicaConfiguration{ + FailoverTarget: _replicaConfiguration["failover_target"].(bool), + + // MysqlReplicaConfiguration has been flattened in the TF schema, so + // we'll keep it flat here instead of another expand method. + MysqlReplicaConfiguration: &sqladmin.MySqlReplicaConfiguration{ + CaCertificate: _replicaConfiguration["ca_certificate"].(string), + ClientCertificate: _replicaConfiguration["client_certificate"].(string), + ClientKey: _replicaConfiguration["client_key"].(string), + ConnectRetryInterval: int64(_replicaConfiguration["connect_retry_interval"].(int)), + DumpFilePath: _replicaConfiguration["dump_file_path"].(string), + MasterHeartbeatPeriod: int64(_replicaConfiguration["master_heartbeat_period"].(int)), + Password: _replicaConfiguration["password"].(string), + SslCipher: _replicaConfiguration["ssl_cipher"].(string), + Username: _replicaConfiguration["username"].(string), + VerifyServerCertificate: _replicaConfiguration["verify_server_certificate"].(bool), + }, + } +} + +func expandMaintenanceWindow(configured []interface{}) *sqladmin.MaintenanceWindow { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + window := configured[0].(map[string]interface{}) + return &sqladmin.MaintenanceWindow{ + Day: int64(window["day"].(int)), + Hour: int64(window["hour"].(int)), + UpdateTrack: window["update_track"].(string), + } +} + +func expandLocationPreference(configured []interface{}) *sqladmin.LocationPreference { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _locationPreference := configured[0].(map[string]interface{}) + return &sqladmin.LocationPreference{ + FollowGaeApplication: _locationPreference["follow_gae_application"].(string), + Zone: _locationPreference["zone"].(string), + } +} + +func expandIpConfiguration(configured []interface{}) *sqladmin.IpConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _ipConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.IpConfiguration{ + Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool), + RequireSsl: _ipConfiguration["require_ssl"].(bool), + AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*schema.Set).List()), + } +} +func expandAuthorizedNetworks(configured []interface{}) []*sqladmin.AclEntry { + an := make([]*sqladmin.AclEntry, 0, len(configured)) + for _, _acl := range configured { + _entry := _acl.(map[string]interface{}) + an = append(an, &sqladmin.AclEntry{ + ExpirationTime: _entry["expiration_time"].(string), + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + + return an +} + +func expandAuthorizedGaeApplications(configured []interface{}) []string { + aga := make([]string, 0, len(configured)) + for _, app := range configured { + aga = append(aga, app.(string)) + } + return aga +} + +func expandDatabaseFlags(configured []interface{}) []*sqladmin.DatabaseFlags { + databaseFlags := make([]*sqladmin.DatabaseFlags, 0, len(configured)) + for _, _flag := range configured { + _entry := _flag.(map[string]interface{}) + + databaseFlags = append(databaseFlags, &sqladmin.DatabaseFlags{ + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + return databaseFlags +} + +func expandBackupConfiguration(configured []interface{}) *sqladmin.BackupConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _backupConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.BackupConfiguration{ + BinaryLogEnabled: _backupConfiguration["binary_log_enabled"].(bool), + Enabled: _backupConfiguration["enabled"].(bool), + StartTime: _backupConfiguration["start_time"].(string), + } +} + +func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + instance, err := config.clientSqlAdmin.Instances.Get(project, + d.Id()).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) + } + + d.Set("name", instance.Name) + d.Set("region", instance.Region) + d.Set("database_version", instance.DatabaseVersion) + d.Set("connection_name", instance.ConnectionName) + d.Set("service_account_email_address", instance.ServiceAccountEmailAddress) + + if err := d.Set("settings", flattenSettings(instance.Settings)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Settings") + } + + if err := d.Set("replica_configuration", flattenReplicaConfiguration(instance.ReplicaConfiguration, d)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Replica Configuration") + } + + ipAddresses := flattenIpAddresses(instance.IpAddresses) + if err := d.Set("ip_address", ipAddresses); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance IP Addresses") + } + + if len(ipAddresses) > 0 { + d.Set("first_ip_address", ipAddresses[0]["ip_address"]) + } + + if err := d.Set("server_ca_cert", flattenServerCaCert(instance.ServerCaCert)); err != nil { + log.Printf("[WARN] Failed to set SQL Database CA Certificate") + } + + d.Set("master_instance_name", strings.TrimPrefix(instance.MasterInstanceName, project+":")) + d.Set("project", project) + d.Set("self_link", instance.SelfLink) + d.SetId(instance.Name) + + return nil +} + +func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Update only updates the settings, so they are all we need to set. + instance := &sqladmin.DatabaseInstance{ + Settings: expandSqlDatabaseInstanceSettings(d.Get("settings").([]interface{}), !isFirstGen(d)), + } + + // Lock on the master_instance_name just in case updating any replica + // settings causes operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + mutexKV.Lock(instanceMutexKey(project, v.(string))) + defer mutexKV.Unlock(instanceMutexKey(project, v.(string))) + } + + op, err := config.clientSqlAdmin.Instances.Update(project, d.Get("name").(string), instance).Do() + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + + err = sqladminOperationWaitTime(config, op, project, "Update Instance", int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if err != nil { + return err + } + + return resourceSqlDatabaseInstanceRead(d, meta) +} + +func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Lock on the master_instance_name just in case deleting a replica causes + // operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + mutexKV.Lock(instanceMutexKey(project, v.(string))) + defer mutexKV.Unlock(instanceMutexKey(project, v.(string))) + } + + op, err := config.clientSqlAdmin.Instances.Delete(project, d.Get("name").(string)).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) + } + + err = sqladminOperationWaitTime(config, op, project, "Delete Instance", int(d.Timeout(schema.TimeoutDelete).Minutes())) + if err != nil { + return err + } + + return nil +} + +func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)"}, d, config) + + // Replace import id for the resource id + id, err := replaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSettings(settings *sqladmin.Settings) []map[string]interface{} { + data := map[string]interface{}{ + "version": settings.SettingsVersion, + "tier": settings.Tier, + "activation_policy": settings.ActivationPolicy, + "authorized_gae_applications": settings.AuthorizedGaeApplications, + "availability_type": settings.AvailabilityType, + "crash_safe_replication": settings.CrashSafeReplicationEnabled, + "disk_type": settings.DataDiskType, + "disk_size": settings.DataDiskSizeGb, + "pricing_plan": settings.PricingPlan, + "replication_type": settings.ReplicationType, + "user_labels": settings.UserLabels, + } + + if settings.BackupConfiguration != nil { + data["backup_configuration"] = flattenBackupConfiguration(settings.BackupConfiguration) + } + + if settings.DatabaseFlags != nil { + data["database_flags"] = flattenDatabaseFlags(settings.DatabaseFlags) + } + + if settings.IpConfiguration != nil { + data["ip_configuration"] = flattenIpConfiguration(settings.IpConfiguration) + } + + if settings.LocationPreference != nil { + data["location_preference"] = flattenLocationPreference(settings.LocationPreference) + } + + if settings.MaintenanceWindow != nil { + data["maintenance_window"] = flattenMaintenanceWindow(settings.MaintenanceWindow) + } + + if settings.StorageAutoResize != nil { + data["disk_autoresize"] = *settings.StorageAutoResize + } + + if settings.UserLabels != nil { + data["user_labels"] = settings.UserLabels + } + + return []map[string]interface{}{data} +} + +func flattenBackupConfiguration(backupConfiguration *sqladmin.BackupConfiguration) []map[string]interface{} { + data := map[string]interface{}{ + "binary_log_enabled": backupConfiguration.BinaryLogEnabled, + "enabled": backupConfiguration.Enabled, + "start_time": backupConfiguration.StartTime, + } + + return []map[string]interface{}{data} +} + +func flattenDatabaseFlags(databaseFlags []*sqladmin.DatabaseFlags) []map[string]interface{} { + flags := make([]map[string]interface{}, 0, len(databaseFlags)) + + for _, flag := range databaseFlags { + data := map[string]interface{}{ + "name": flag.Name, + "value": flag.Value, + } + + flags = append(flags, data) + } + + return flags +} + +func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration) interface{} { + data := map[string]interface{}{ + "ipv4_enabled": ipConfiguration.Ipv4Enabled, + "require_ssl": ipConfiguration.RequireSsl, + } + + if ipConfiguration.AuthorizedNetworks != nil { + data["authorized_networks"] = flattenAuthorizedNetworks(ipConfiguration.AuthorizedNetworks) + } + + return []map[string]interface{}{data} +} + +func flattenAuthorizedNetworks(entries []*sqladmin.AclEntry) interface{} { + networks := schema.NewSet(schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), []interface{}{}) + + for _, entry := range entries { + data := map[string]interface{}{ + "expiration_time": entry.ExpirationTime, + "name": entry.Name, + "value": entry.Value, + } + + networks.Add(data) + } + + return networks +} + +func flattenLocationPreference(locationPreference *sqladmin.LocationPreference) interface{} { + data := map[string]interface{}{ + "follow_gae_application": locationPreference.FollowGaeApplication, + "zone": locationPreference.Zone, + } + + return []map[string]interface{}{data} +} + +func flattenMaintenanceWindow(maintenanceWindow *sqladmin.MaintenanceWindow) interface{} { + data := map[string]interface{}{ + "day": maintenanceWindow.Day, + "hour": maintenanceWindow.Hour, + "update_track": maintenanceWindow.UpdateTrack, + } + + return []map[string]interface{}{data} +} + +func flattenReplicaConfiguration(replicaConfiguration *sqladmin.ReplicaConfiguration, d *schema.ResourceData) []map[string]interface{} { + rc := []map[string]interface{}{} + + if replicaConfiguration != nil { + data := map[string]interface{}{ + "failover_target": replicaConfiguration.FailoverTarget, + + // Don't attempt to assign anything from replicaConfiguration.MysqlReplicaConfiguration, + // since those fields are set on create and then not stored. See description at + // https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances. + // Instead, set them to the values they previously had so we don't set them all to zero. + "ca_certificate": d.Get("replica_configuration.0.ca_certificate"), + "client_certificate": d.Get("replica_configuration.0.client_certificate"), + "client_key": d.Get("replica_configuration.0.client_key"), + "connect_retry_interval": d.Get("replica_configuration.0.connect_retry_interval"), + "dump_file_path": d.Get("replica_configuration.0.dump_file_path"), + "master_heartbeat_period": d.Get("replica_configuration.0.master_heartbeat_period"), + "password": d.Get("replica_configuration.0.password"), + "ssl_cipher": d.Get("replica_configuration.0.ssl_cipher"), + "username": d.Get("replica_configuration.0.username"), + "verify_server_certificate": d.Get("replica_configuration.0.verify_server_certificate"), + } + rc = append(rc, data) + } + + return rc +} + +func flattenIpAddresses(ipAddresses []*sqladmin.IpMapping) []map[string]interface{} { + var ips []map[string]interface{} + + for _, ip := range ipAddresses { + data := map[string]interface{}{ + "ip_address": ip.IpAddress, + "time_to_retire": ip.TimeToRetire, + } + + ips = append(ips, data) + } + + return ips +} + +func flattenServerCaCert(caCert *sqladmin.SslCert) []map[string]interface{} { + var cert []map[string]interface{} + + if caCert != nil { + data := map[string]interface{}{ + "cert": caCert.Cert, + "common_name": caCert.CommonName, + "create_time": caCert.CreateTime, + "expiration_time": caCert.ExpirationTime, + "sha1_fingerprint": caCert.Sha1Fingerprint, + } + + cert = append(cert, data) + } + + return cert +} + +func instanceMutexKey(project, instance_name string) string { + return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) +} + +// sqlDatabaseIsMaster returns true if the provided schema.ResourceData represents a +// master SQL Instance, and false if it is a replica. +func sqlDatabaseIsMaster(d *schema.ResourceData) bool { + _, ok := d.GetOk("master_instance_name") + return !ok +} diff --git a/provider/terraform/resources/resource_sql_user.go b/provider/terraform/resources/resource_sql_user.go new file mode 100644 index 000000000000..4ff1ab728d4e --- /dev/null +++ b/provider/terraform/resources/resource_sql_user.go @@ -0,0 +1,242 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlUser() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlUserCreate, + Read: resourceSqlUserRead, + Update: resourceSqlUserUpdate, + Delete: resourceSqlUserDelete, + Importer: &schema.ResourceImporter{ + State: resourceSqlUserImporter, + }, + + SchemaVersion: 1, + MigrateState: resourceSqlUserMigrateState, + + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + password := d.Get("password").(string) + host := d.Get("host").(string) + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + Host: host, + } + + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) + op, err := config.clientSqlAdmin.Users.Insert(project, instance, + user).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert "+ + "user %s into instance %s: %s", name, instance, err) + } + + // This will include a double-slash (//) for postgres instances, + // for which user.Host is an empty string. That's okay. + d.SetId(fmt.Sprintf("%s/%s/%s", user.Name, user.Host, user.Instance)) + + err = sqladminOperationWait(config, op, project, "Insert User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for insertion of %s "+ + "into %s: %s", name, instance, err) + } + + return resourceSqlUserRead(d, meta) +} + +func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + instance := d.Get("instance").(string) + name := d.Get("name").(string) + host := d.Get("host").(string) + + var users *sqladmin.UsersListResponse + err = nil + err = retryTime(func() error { + users, err = config.clientSqlAdmin.Users.List(project, instance).Do() + return err + }, 5) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance)) + } + + var user *sqladmin.User + for _, currentUser := range users.Items { + // The second part of this conditional is irrelevant for postgres instances because + // host and currentUser.Host will always both be empty. + if currentUser.Name == name && currentUser.Host == host { + user = currentUser + break + } + } + + if user == nil { + log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + d.Set("host", user.Host) + d.Set("instance", user.Instance) + d.Set("name", user.Name) + d.Set("project", project) + d.SetId(fmt.Sprintf("%s/%s/%s", user.Name, user.Host, user.Instance)) + return nil +} + +func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if d.HasChange("password") { + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + host := d.Get("host").(string) + password := d.Get("password").(string) + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + Host: host, + } + + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) + op, err := config.clientSqlAdmin.Users.Update(project, instance, host, name, + user).Do() + + if err != nil { + return fmt.Errorf("Error, failed to update"+ + "user %s into user %s: %s", name, instance, err) + } + + err = sqladminOperationWait(config, op, project, "Insert User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for update of %s "+ + "in %s: %s", name, instance, err) + } + + return resourceSqlUserRead(d, meta) + } + + return nil +} + +func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + host := d.Get("host").(string) + + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) + op, err := config.clientSqlAdmin.Users.Delete(project, instance, host, name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete"+ + "user %s in instance %s: %s", name, + instance, err) + } + + err = sqladminOperationWait(config, op, project, "Delete User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for deletion of %s "+ + "in %s: %s", name, instance, err) + } + + return nil +} + +func resourceSqlUserImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + + if len(parts) == 2 { + d.Set("instance", parts[0]) + d.Set("name", parts[1]) + } else if len(parts) == 3 { + d.Set("instance", parts[0]) + d.Set("host", parts[1]) + d.Set("name", parts[2]) + } else { + return nil, fmt.Errorf("Invalid specifier. Expecting {instance}/{name} for postgres instance and {instance}/{host}/{name} for MySQL instance") + } + + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resource_sql_user_migrate.go b/provider/terraform/resources/resource_sql_user_migrate.go new file mode 100644 index 000000000000..7f52771ad4a6 --- /dev/null +++ b/provider/terraform/resources/resource_sql_user_migrate.go @@ -0,0 +1,39 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceSqlUserMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Google Sql User State v0; migrating to v1") + is, err := migrateSqlUserStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateSqlUserStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + name := is.Attributes["name"] + instance := is.Attributes["instance"] + is.ID = fmt.Sprintf("%s/%s", instance, name) + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/provider/terraform/resources/resource_storage_bucket.go b/provider/terraform/resources/resource_storage_bucket.go new file mode 100644 index 000000000000..1f94a6bb7808 --- /dev/null +++ b/provider/terraform/resources/resource_storage_bucket.go @@ -0,0 +1,814 @@ +package google + +import ( + "bytes" + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/googleapi" + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketCreate, + Read: resourceStorageBucketRead, + Update: resourceStorageBucketUpdate, + Delete: resourceStorageBucketDelete, + Importer: &schema.ResourceImporter{ + State: resourceStorageBucketStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "encryption": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_kms_key_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "force_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "location": &schema.Schema{ + Type: schema.TypeString, + Default: "US", + Optional: true, + ForceNew: true, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Removed: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "STANDARD", + ForceNew: true, + }, + + "lifecycle_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 100, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleActionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "condition": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleConditionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "age": { + Type: schema.TypeInt, + Optional: true, + }, + "created_before": { + Type: schema.TypeString, + Optional: true, + }, + "is_live": { + Type: schema.TypeBool, + Optional: true, + }, + "matches_storage_class": { + Type: schema.TypeList, + Optional: true, + MinItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "num_newer_versions": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + + "versioning": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + + "website": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_page_suffix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "not_found_page": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "cors": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "origin": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "method": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "response_header": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "max_age_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "logging": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "log_object_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Get the bucket and location + bucket := d.Get("name").(string) + location := d.Get("location").(string) + + // Create a bucket, setting the labels, location and name. + sb := &storage.Bucket{ + Name: bucket, + Labels: expandLabels(d), + Location: location, + } + + if v, ok := d.GetOk("storage_class"); ok { + sb.StorageClass = v.(string) + } + + if err := resourceGCSBucketLifecycleCreateOrUpdate(d, sb); err != nil { + return err + } + + if v, ok := d.GetOk("versioning"); ok { + sb.Versioning = expandBucketVersioning(v) + } + + if v, ok := d.GetOk("website"); ok { + websites := v.([]interface{}) + + if len(websites) > 1 { + return fmt.Errorf("At most one website block is allowed") + } + + sb.Website = &storage.BucketWebsite{} + + website := websites[0].(map[string]interface{}) + + if v, ok := website["not_found_page"]; ok { + sb.Website.NotFoundPage = v.(string) + } + + if v, ok := website["main_page_suffix"]; ok { + sb.Website.MainPageSuffix = v.(string) + } + } + + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } + + if v, ok := d.GetOk("logging"); ok { + sb.Logging = expandBucketLogging(v.([]interface{})) + } + + if v, ok := d.GetOk("encryption"); ok { + sb.Encryption = expandBucketEncryption(v.([]interface{})) + } + + var res *storage.Bucket + + err = retry(func() error { + res, err = config.clientStorage.Buckets.Insert(project, sb).Do() + return err + }) + + if err != nil { + fmt.Printf("Error creating bucket %s: %v", bucket, err) + return err + } + + log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) + + d.SetId(res.Id) + return resourceStorageBucketRead(d, meta) +} + +func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sb := &storage.Bucket{} + + if d.HasChange("lifecycle_rule") { + if err := resourceGCSBucketLifecycleCreateOrUpdate(d, sb); err != nil { + return err + } + } + + if d.HasChange("versioning") { + if v, ok := d.GetOk("versioning"); ok { + sb.Versioning = expandBucketVersioning(v) + } + } + + if d.HasChange("website") { + if v, ok := d.GetOk("website"); ok { + websites := v.([]interface{}) + + if len(websites) > 1 { + return fmt.Errorf("At most one website block is allowed") + } + + // Setting fields to "" to be explicit that the PATCH call will + // delete this field. + if len(websites) == 0 { + sb.Website.NotFoundPage = "" + sb.Website.MainPageSuffix = "" + } else { + website := websites[0].(map[string]interface{}) + sb.Website = &storage.BucketWebsite{} + if v, ok := website["not_found_page"]; ok { + sb.Website.NotFoundPage = v.(string) + } else { + sb.Website.NotFoundPage = "" + } + + if v, ok := website["main_page_suffix"]; ok { + sb.Website.MainPageSuffix = v.(string) + } else { + sb.Website.MainPageSuffix = "" + } + } + } + } + + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } + + if d.HasChange("logging") { + if v, ok := d.GetOk("logging"); ok { + sb.Logging = expandBucketLogging(v.([]interface{})) + } else { + sb.NullFields = append(sb.NullFields, "Logging") + } + } + + if d.HasChange("encryption") { + if v, ok := d.GetOk("encryption"); ok { + sb.Encryption = expandBucketEncryption(v.([]interface{})) + } else { + sb.NullFields = append(sb.NullFields, "Encryption") + } + } + + if d.HasChange("labels") { + sb.Labels = expandLabels(d) + if len(sb.Labels) == 0 { + sb.NullFields = append(sb.NullFields, "Labels") + } + + // To delete a label using PATCH, we have to explicitly set its value + // to null. + old, _ := d.GetChange("labels") + for k := range old.(map[string]interface{}) { + if _, ok := sb.Labels[k]; !ok { + sb.NullFields = append(sb.NullFields, fmt.Sprintf("Labels.%s", k)) + } + } + } + + res, err := config.clientStorage.Buckets.Patch(d.Get("name").(string), sb).Do() + + if err != nil { + return err + } + + log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // Assign the bucket ID as the resource ID + d.Set("self_link", res.SelfLink) + d.SetId(res.Id) + + return nil +} + +func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the bucket and acl + bucket := d.Get("name").(string) + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) + } + log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // We need to get the project associated with this bucket because otherwise import + // won't work properly. That means we need to call the projects.get API with the + // project number, to get the project ID - there's no project ID field in the + // resource response. However, this requires a call to the Compute API, which + // would otherwise not be required for this resource. So, we're going to + // intentionally check whether the project is set *on the resource*. If it is, + // we will not try to fetch the project name. If it is not, either because + // the user intends to use the default provider project, or because the resource + // is currently being imported, we will read it from the API. + if _, ok := d.GetOk("project"); !ok { + proj, err := config.clientCompute.Projects.Get(strconv.FormatUint(res.ProjectNumber, 10)).Do() + if err != nil { + return err + } + log.Printf("[DEBUG] Bucket %v is in project number %v, which is project ID %s.\n", res.Name, res.ProjectNumber, proj.Name) + d.Set("project", proj.Name) + } + + // Update the bucket ID according to the resource ID + d.Set("self_link", res.SelfLink) + d.Set("url", fmt.Sprintf("gs://%s", bucket)) + d.Set("storage_class", res.StorageClass) + d.Set("encryption", flattenBucketEncryption(res.Encryption)) + d.Set("location", res.Location) + d.Set("cors", flattenCors(res.Cors)) + d.Set("logging", flattenBucketLogging(res.Logging)) + d.Set("versioning", flattenBucketVersioning(res.Versioning)) + d.Set("lifecycle_rule", flattenBucketLifecycle(res.Lifecycle)) + d.Set("labels", res.Labels) + d.SetId(res.Id) + return nil +} + +func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the bucket + bucket := d.Get("name").(string) + + for { + res, err := config.clientStorage.Objects.List(bucket).Versions(true).Do() + if err != nil { + fmt.Printf("Error Objects.List failed: %v", err) + return err + } + + if len(res.Items) != 0 { + if d.Get("force_destroy").(bool) { + // purge the bucket... + log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n") + + for _, object := range res.Items { + log.Printf("[DEBUG] Found %s", object.Name) + if err := config.clientStorage.Objects.Delete(bucket, object.Name).Generation(object.Generation).Do(); err != nil { + log.Fatalf("Error trying to delete object: %s %s\n\n", object.Name, err) + } else { + log.Printf("Object deleted: %s \n\n", object.Name) + } + } + + } else { + delete_err := errors.New("Error trying to delete a bucket containing objects without `force_destroy` set to true") + log.Printf("Error! %s : %s\n\n", bucket, delete_err) + return delete_err + } + } else { + break // 0 items, bucket empty + } + } + + // remove empty bucket + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := config.clientStorage.Buckets.Delete(bucket).Do() + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { + return resource.RetryableError(gerr) + } + return resource.NonRetryableError(err) + }) + if err != nil { + fmt.Printf("Error deleting bucket %s: %v\n\n", bucket, err) + return err + } + log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket) + + return nil +} + +func resourceStorageBucketStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + d.Set("force_destroy", false) + return []*schema.ResourceData{d}, nil +} + +func expandCors(configured []interface{}) []*storage.BucketCors { + corsRules := make([]*storage.BucketCors, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + corsRule := storage.BucketCors{ + Origin: convertStringArr(data["origin"].([]interface{})), + Method: convertStringArr(data["method"].([]interface{})), + ResponseHeader: convertStringArr(data["response_header"].([]interface{})), + MaxAgeSeconds: int64(data["max_age_seconds"].(int)), + } + + corsRules = append(corsRules, &corsRule) + } + return corsRules +} + +func flattenCors(corsRules []*storage.BucketCors) []map[string]interface{} { + corsRulesSchema := make([]map[string]interface{}, 0, len(corsRules)) + for _, corsRule := range corsRules { + data := map[string]interface{}{ + "origin": corsRule.Origin, + "method": corsRule.Method, + "response_header": corsRule.ResponseHeader, + "max_age_seconds": corsRule.MaxAgeSeconds, + } + + corsRulesSchema = append(corsRulesSchema, data) + } + return corsRulesSchema +} + +func expandBucketEncryption(configured interface{}) *storage.BucketEncryption { + encs := configured.([]interface{}) + enc := encs[0].(map[string]interface{}) + bucketenc := &storage.BucketEncryption{ + DefaultKmsKeyName: enc["default_kms_key_name"].(string), + } + return bucketenc +} + +func flattenBucketEncryption(enc *storage.BucketEncryption) []map[string]interface{} { + encryption := make([]map[string]interface{}, 0, 1) + + if enc == nil { + return encryption + } + + encryption = append(encryption, map[string]interface{}{ + "default_kms_key_name": enc.DefaultKmsKeyName, + }) + + return encryption +} + +func expandBucketLogging(configured interface{}) *storage.BucketLogging { + loggings := configured.([]interface{}) + logging := loggings[0].(map[string]interface{}) + + bucketLogging := &storage.BucketLogging{ + LogBucket: logging["log_bucket"].(string), + LogObjectPrefix: logging["log_object_prefix"].(string), + } + + return bucketLogging +} + +func flattenBucketLogging(bucketLogging *storage.BucketLogging) []map[string]interface{} { + loggings := make([]map[string]interface{}, 0, 1) + + if bucketLogging == nil { + return loggings + } + + logging := map[string]interface{}{ + "log_bucket": bucketLogging.LogBucket, + "log_object_prefix": bucketLogging.LogObjectPrefix, + } + + loggings = append(loggings, logging) + return loggings +} + +func expandBucketVersioning(configured interface{}) *storage.BucketVersioning { + versionings := configured.([]interface{}) + versioning := versionings[0].(map[string]interface{}) + + bucketVersioning := &storage.BucketVersioning{} + + bucketVersioning.Enabled = versioning["enabled"].(bool) + bucketVersioning.ForceSendFields = append(bucketVersioning.ForceSendFields, "Enabled") + + return bucketVersioning +} + +func flattenBucketVersioning(bucketVersioning *storage.BucketVersioning) []map[string]interface{} { + versionings := make([]map[string]interface{}, 0, 1) + + if bucketVersioning == nil { + return versionings + } + + versioning := map[string]interface{}{ + "enabled": bucketVersioning.Enabled, + } + versionings = append(versionings, versioning) + return versionings +} + +func flattenBucketLifecycle(lifecycle *storage.BucketLifecycle) []map[string]interface{} { + if lifecycle == nil || lifecycle.Rule == nil { + return []map[string]interface{}{} + } + + rules := make([]map[string]interface{}, 0, len(lifecycle.Rule)) + + for _, rule := range lifecycle.Rule { + rules = append(rules, map[string]interface{}{ + "action": schema.NewSet(resourceGCSBucketLifecycleRuleActionHash, []interface{}{flattenBucketLifecycleRuleAction(rule.Action)}), + "condition": schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(rule.Condition)}), + }) + } + + return rules +} + +func flattenBucketLifecycleRuleAction(action *storage.BucketLifecycleRuleAction) map[string]interface{} { + return map[string]interface{}{ + "type": action.Type, + "storage_class": action.StorageClass, + } +} + +func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleCondition) map[string]interface{} { + ruleCondition := map[string]interface{}{ + "age": int(condition.Age), + "created_before": condition.CreatedBefore, + "matches_storage_class": convertStringArrToInterface(condition.MatchesStorageClass), + "num_newer_versions": int(condition.NumNewerVersions), + } + if condition.IsLive != nil { + ruleCondition["is_live"] = *condition.IsLive + } + return ruleCondition +} + +func resourceGCSBucketLifecycleCreateOrUpdate(d *schema.ResourceData, sb *storage.Bucket) error { + if v, ok := d.GetOk("lifecycle_rule"); ok { + lifecycle_rules := v.([]interface{}) + + sb.Lifecycle = &storage.BucketLifecycle{} + sb.Lifecycle.Rule = make([]*storage.BucketLifecycleRule, 0, len(lifecycle_rules)) + + for _, raw_lifecycle_rule := range lifecycle_rules { + lifecycle_rule := raw_lifecycle_rule.(map[string]interface{}) + + target_lifecycle_rule := &storage.BucketLifecycleRule{} + + if v, ok := lifecycle_rule["action"]; ok { + if actions := v.(*schema.Set).List(); len(actions) == 1 { + action := actions[0].(map[string]interface{}) + + target_lifecycle_rule.Action = &storage.BucketLifecycleRuleAction{} + + if v, ok := action["type"]; ok { + target_lifecycle_rule.Action.Type = v.(string) + } + + if v, ok := action["storage_class"]; ok { + target_lifecycle_rule.Action.StorageClass = v.(string) + } + } else { + return fmt.Errorf("Exactly one action is required") + } + } + + if v, ok := lifecycle_rule["condition"]; ok { + if conditions := v.(*schema.Set).List(); len(conditions) == 1 { + condition := conditions[0].(map[string]interface{}) + + target_lifecycle_rule.Condition = &storage.BucketLifecycleRuleCondition{} + + if v, ok := condition["age"]; ok { + target_lifecycle_rule.Condition.Age = int64(v.(int)) + } + + if v, ok := condition["created_before"]; ok { + target_lifecycle_rule.Condition.CreatedBefore = v.(string) + } + + if v, ok := condition["is_live"]; ok { + target_lifecycle_rule.Condition.IsLive = googleapi.Bool(v.(bool)) + } + + if v, ok := condition["matches_storage_class"]; ok { + matches_storage_classes := v.([]interface{}) + + target_matches_storage_classes := make([]string, 0, len(matches_storage_classes)) + + for _, v := range matches_storage_classes { + target_matches_storage_classes = append(target_matches_storage_classes, v.(string)) + } + + target_lifecycle_rule.Condition.MatchesStorageClass = target_matches_storage_classes + } + + if v, ok := condition["num_newer_versions"]; ok { + target_lifecycle_rule.Condition.NumNewerVersions = int64(v.(int)) + } + } else { + return fmt.Errorf("Exactly one condition is required") + } + } + + sb.Lifecycle.Rule = append(sb.Lifecycle.Rule, target_lifecycle_rule) + } + } else { + sb.Lifecycle = &storage.BucketLifecycle{ + ForceSendFields: []string{"Rule"}, + } + } + + return nil +} + +func resourceGCSBucketLifecycleRuleActionHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) + + if v, ok := m["storage_class"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["age"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + + if v, ok := m["created_before"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["is_live"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + + if v, ok := m["matches_storage_class"]; ok { + matches_storage_classes := v.([]interface{}) + for _, matches_storage_class := range matches_storage_classes { + buf.WriteString(fmt.Sprintf("%s-", matches_storage_class)) + } + } + + if v, ok := m["num_newer_versions"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + + return hashcode.String(buf.String()) +} diff --git a/provider/terraform/resources/resource_storage_bucket_acl.go b/provider/terraform/resources/resource_storage_bucket_acl.go new file mode 100644 index 000000000000..e260607ac02a --- /dev/null +++ b/provider/terraform/resources/resource_storage_bucket_acl.go @@ -0,0 +1,346 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucketAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketAclCreate, + Read: resourceStorageBucketAclRead, + Update: resourceStorageBucketAclUpdate, + Delete: resourceStorageBucketAclDelete, + CustomizeDiff: resourceStorageRoleEntityCustomizeDiff, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "default_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"role_entity"}, + }, + + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ConflictsWith: []string{"predefined_acl"}, + }, + }, + } +} + +func resourceStorageRoleEntityCustomizeDiff(diff *schema.ResourceDiff, meta interface{}) error { + keys := diff.GetChangedKeysPrefix("role_entity") + if len(keys) < 1 { + return nil + } + count := diff.Get("role_entity.#").(int) + if count < 1 { + return nil + } + state := map[string]struct{}{} + conf := map[string]struct{}{} + for i := 0; i < count; i++ { + old, new := diff.GetChange(fmt.Sprintf("role_entity.%d", i)) + state[old.(string)] = struct{}{} + conf[new.(string)] = struct{}{} + } + if len(state) != len(conf) { + return nil + } + for k, _ := range state { + if _, ok := conf[k]; !ok { + return nil + } + } + return diff.Clear("role_entity") +} + +type RoleEntity struct { + Role string + Entity string +} + +func getBucketAclId(bucket string) string { + return bucket + "-acl" +} + +func getRoleEntityPair(role_entity string) (*RoleEntity, error) { + split := strings.Split(role_entity, ":") + if len(split) != 2 { + return nil, fmt.Errorf("Error, each role entity pair must be " + + "formatted as ROLE:entity") + } + + return &RoleEntity{Role: split[0], Entity: split[1]}, nil +} + +func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + predefined_acl := "" + default_acl := "" + role_entity := make([]interface{}, 0) + + if v, ok := d.GetOk("predefined_acl"); ok { + predefined_acl = v.(string) + } + + if v, ok := d.GetOk("role_entity"); ok { + role_entity = v.([]interface{}) + } + + if v, ok := d.GetOk("default_acl"); ok { + default_acl = v.(string) + } + + if len(predefined_acl) > 0 { + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedAcl(predefined_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + } + + if len(role_entity) > 0 { + current, err := config.clientStorage.BucketAccessControls.List(bucket).Do() + if err != nil { + return fmt.Errorf("Error retrieving current ACLs: %s", err) + } + for _, v := range role_entity { + pair, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + var alreadyInserted bool + for _, cur := range current.Items { + if cur.Entity == pair.Entity && cur.Role == pair.Role { + alreadyInserted = true + break + } + } + if alreadyInserted { + log.Printf("[DEBUG]: pair %s-%s already exists, not trying to insert again\n", pair.Role, pair.Entity) + continue + } + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: storing re %s-%s", pair.Role, pair.Entity) + + _, err = config.clientStorage.BucketAccessControls.Insert(bucket, bucketAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + } + + if len(default_acl) > 0 { + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + } + + d.SetId(getBucketAclId(bucket)) + return resourceStorageBucketAclRead(d, meta) +} + +func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + // The API offers no way to retrieve predefined ACLs, + // and we can't tell which access controls were created + // by the predefined roles, so... + // + // This is, needless to say, a bad state of affairs and + // should be fixed. + if _, ok := d.GetOk("role_entity"); ok { + res, err := config.clientStorage.BucketAccessControls.List(bucket).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) + } + entities := make([]string, 0, len(res.Items)) + for _, item := range res.Items { + entities = append(entities, item.Role+":"+item.Entity) + } + + d.Set("role_entity", entities) + } else { + // if we don't set `role_entity` to nil (effectively setting it + // to empty in Terraform state), because it's computed now, + // Terraform will think it's missing from state, is supposed + // to be there, and throw up a diff for role_entity.#. So it + // must always be set in state. + d.Set("role_entity", nil) + } + + return nil +} + +func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + if d.HasChange("role_entity") { + bkt, err := config.clientStorage.Buckets.Get(bucket).Do() + if err != nil { + return fmt.Errorf("Error reading bucket %q: %v", bucket, err) + } + + project := strconv.FormatUint(bkt.ProjectNumber, 10) + o, n := d.GetChange("role_entity") + old_re, new_re := o.([]interface{}), n.([]interface{}) + + old_re_map := make(map[string]string) + for _, v := range old_re { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + old_re_map[res.Entity] = res.Role + } + + for _, v := range new_re { + pair, err := getRoleEntityPair(v.(string)) + + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state is missing this entity, it needs to be inserted + if _, ok := old_re_map[pair.Entity]; !ok { + _, err = config.clientStorage.BucketAccessControls.Insert( + bucket, bucketAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(old_re_map, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + for entity, role := range old_re_map { + if entity == fmt.Sprintf("project-owners-%s", project) && role == "OWNER" { + log.Printf("Skipping %s-%s; not deleting owner ACL.", role, entity) + continue + } + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.clientStorage.BucketAccessControls.Delete(bucket, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + return resourceStorageBucketAclRead(d, meta) + } + + if d.HasChange("default_acl") { + default_acl := d.Get("default_acl").(string) + + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta) + } + + return nil +} + +func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + bkt, err := config.clientStorage.Buckets.Get(bucket).Do() + if err != nil { + return fmt.Errorf("Error retrieving bucket %q: %v", bucket, err) + } + project := strconv.FormatUint(bkt.ProjectNumber, 10) + + re_local := d.Get("role_entity").([]interface{}) + for _, v := range re_local { + res, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + + if res.Entity == fmt.Sprintf("project-owners-%s", project) && res.Role == "OWNER" { + log.Printf("Skipping %s-%s; not deleting owner ACL.", res.Role, res.Entity) + continue + } + + log.Printf("[DEBUG]: removing entity %s", res.Entity) + + err = config.clientStorage.BucketAccessControls.Delete(bucket, res.Entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) + } + } + + return nil +} diff --git a/provider/terraform/resources/resource_storage_bucket_object.go b/provider/terraform/resources/resource_storage_bucket_object.go new file mode 100644 index 000000000000..7f7563461fcc --- /dev/null +++ b/provider/terraform/resources/resource_storage_bucket_object.go @@ -0,0 +1,283 @@ +package google + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + + "github.com/hashicorp/terraform/helper/schema" + + "crypto/md5" + "encoding/base64" + "google.golang.org/api/googleapi" + "google.golang.org/api/storage/v1" + "io/ioutil" +) + +func resourceStorageBucketObject() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketObjectCreate, + Read: resourceStorageBucketObjectRead, + Delete: resourceStorageBucketObjectDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cache_control": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_disposition": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_encoding": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_language": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source"}, + }, + + "crc32c": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "md5hash": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Removed: "Please use resource \"storage_object_acl.predefined_acl\" instead.", + Optional: true, + ForceNew: true, + }, + + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"content"}, + }, + + // Detect changes to local file or changes made outside of Terraform to the file stored on the server. + "detect_md5hash": &schema.Schema{ + Type: schema.TypeString, + // This field is not Computed because it needs to trigger a diff. + Optional: true, + ForceNew: true, + // Makes the diff message nicer: + // detect_md5hash: "1XcnP/iFw/hNrbhXi7QTmQ==" => "different hash" (forces new resource) + // Instead of the more confusing: + // detect_md5hash: "1XcnP/iFw/hNrbhXi7QTmQ==" => "" (forces new resource) + Default: "different hash", + // 1. Compute the md5 hash of the local file + // 2. Compare the computed md5 hash with the hash stored in Cloud Storage + // 3. Don't suppress the diff iff they don't match + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + localMd5Hash := "" + if source, ok := d.GetOkExists("source"); ok { + localMd5Hash = getFileMd5Hash(source.(string)) + } + + if content, ok := d.GetOkExists("content"); ok { + localMd5Hash = getContentMd5Hash([]byte(content.(string))) + } + + // If `source` or `content` is dynamically set, both field will be empty. + // We should not suppress the diff to avoid the following error: + // 'Mismatch reason: extra attributes: detect_md5hash' + if localMd5Hash == "" { + return false + } + + // `old` is the md5 hash we retrieved from the server in the ReadFunc + if old != localMd5Hash { + return false + } + + return true + }, + }, + + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + } +} + +func objectGetId(object *storage.Object) string { + return object.Bucket + "-" + object.Name +} + +func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + var media io.Reader + + if v, ok := d.GetOk("source"); ok { + err := error(nil) + media, err = os.Open(v.(string)) + if err != nil { + return err + } + } else if v, ok := d.GetOk("content"); ok { + media = bytes.NewReader([]byte(v.(string))) + } else { + return fmt.Errorf("Error, either \"content\" or \"source\" must be specified") + } + + objectsService := storage.NewObjectsService(config.clientStorage) + object := &storage.Object{Bucket: bucket} + + if v, ok := d.GetOk("cache_control"); ok { + object.CacheControl = v.(string) + } + + if v, ok := d.GetOk("content_disposition"); ok { + object.ContentDisposition = v.(string) + } + + if v, ok := d.GetOk("content_encoding"); ok { + object.ContentEncoding = v.(string) + } + + if v, ok := d.GetOk("content_language"); ok { + object.ContentLanguage = v.(string) + } + + if v, ok := d.GetOk("content_type"); ok { + object.ContentType = v.(string) + } + + if v, ok := d.GetOk("storage_class"); ok { + object.StorageClass = v.(string) + } + + insertCall := objectsService.Insert(bucket, object) + insertCall.Name(name) + insertCall.Media(media) + + _, err := insertCall.Do() + + if err != nil { + return fmt.Errorf("Error uploading object %s: %s", name, err) + } + + return resourceStorageBucketObjectRead(d, meta) +} + +func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + + objectsService := storage.NewObjectsService(config.clientStorage) + getCall := objectsService.Get(bucket, name) + + res, err := getCall.Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket Object %q", d.Get("name").(string))) + } + + d.Set("md5hash", res.Md5Hash) + d.Set("detect_md5hash", res.Md5Hash) + d.Set("crc32c", res.Crc32c) + d.Set("cache_control", res.CacheControl) + d.Set("content_disposition", res.ContentDisposition) + d.Set("content_encoding", res.ContentEncoding) + d.Set("content_language", res.ContentLanguage) + d.Set("content_type", res.ContentType) + d.Set("storage_class", res.StorageClass) + + d.SetId(objectGetId(res)) + + return nil +} + +func resourceStorageBucketObjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + + objectsService := storage.NewObjectsService(config.clientStorage) + + DeleteCall := objectsService.Delete(bucket, name) + err := DeleteCall.Do() + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Bucket Object %q because it's gone", name) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error deleting contents of object %s: %s", name, err) + } + + return nil +} + +func getFileMd5Hash(filename string) string { + data, err := ioutil.ReadFile(filename) + if err != nil { + log.Printf("[WARN] Failed to read source file %q. Cannot compute md5 hash for it.", filename) + return "" + } + + return getContentMd5Hash(data) +} + +func getContentMd5Hash(content []byte) string { + h := md5.New() + h.Write(content) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} diff --git a/provider/terraform/resources/resource_storage_default_object_acl.go b/provider/terraform/resources/resource_storage_default_object_acl.go new file mode 100644 index 000000000000..a88d08231a73 --- /dev/null +++ b/provider/terraform/resources/resource_storage_default_object_acl.go @@ -0,0 +1,187 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/storage/v1" +) + +func resourceStorageDefaultObjectAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageDefaultObjectAclCreate, + Read: resourceStorageDefaultObjectAclRead, + Update: resourceStorageDefaultObjectAclUpdate, + Delete: resourceStorageDefaultObjectAclDelete, + CustomizeDiff: resourceStorageRoleEntityCustomizeDiff, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + MinItems: 1, + }, + }, + } +} + +func resourceStorageDefaultObjectAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + roleEntity := d.Get("role_entity").([]interface{}) + + for _, v := range roleEntity { + pair, err := getRoleEntityPair(v.(string)) + + ObjectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: setting role = %s, entity = %s on bucket %s", pair.Role, pair.Entity, bucket) + + _, err = config.clientStorage.DefaultObjectAccessControls.Insert(bucket, ObjectAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error setting Default Object ACL for %s on bucket %s: %v", pair.Entity, bucket, err) + } + } + d.SetId(bucket) + return resourceStorageDefaultObjectAclRead(d, meta) +} + +func resourceStorageDefaultObjectAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + roleEntities := make([]interface{}, 0) + reLocal := d.Get("role_entity").([]interface{}) + reLocalMap := make(map[string]string) + for _, v := range reLocal { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + reLocalMap[res.Entity] = res.Role + } + + res, err := config.clientStorage.DefaultObjectAccessControls.List(bucket).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Default Object ACL for bucket %q", d.Get("bucket").(string))) + } + + for _, v := range res.Items { + role := v.Role + entity := v.Entity + // We only store updates to the locally defined access controls + if _, in := reLocalMap[entity]; in { + roleEntities = append(roleEntities, fmt.Sprintf("%s:%s", role, entity)) + log.Printf("[DEBUG]: saving re %s-%s", v.Role, v.Entity) + } + } + + d.Set("role_entity", roleEntities) + + return nil +} + +func resourceStorageDefaultObjectAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + if !d.HasChange("role_entity") { + return nil + } + o, n := d.GetChange("role_entity") + oldRe := o.([]interface{}) + newRe := n.([]interface{}) + + oldReMap := make(map[string]string) + for _, v := range oldRe { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + oldReMap[res.Entity] = res.Role + } + + for _, v := range newRe { + pair, err := getRoleEntityPair(v.(string)) + + ObjectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state is present for the entity, it is updated + // If the old state is missing, it is inserted + if _, ok := oldReMap[pair.Entity]; ok { + _, err = config.clientStorage.DefaultObjectAccessControls.Update( + bucket, pair.Entity, ObjectAccessControl).Do() + } else { + _, err = config.clientStorage.DefaultObjectAccessControls.Insert( + bucket, ObjectAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(oldReMap, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating Storage Default Object ACL for bucket %s: %v", bucket, err) + } + } + + for entity := range oldReMap { + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.clientStorage.DefaultObjectAccessControls.Delete(bucket, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating Storage Default Object ACL for bucket %s: %v", bucket, err) + } + } + + return resourceStorageDefaultObjectAclRead(d, meta) +} + +func resourceStorageDefaultObjectAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + reLocal := d.Get("role_entity").([]interface{}) + for _, v := range reLocal { + res, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + + log.Printf("[DEBUG]: removing entity %s", res.Entity) + + err = config.clientStorage.DefaultObjectAccessControls.Delete(bucket, res.Entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) + } + } + + return nil +} diff --git a/provider/terraform/resources/resource_storage_notification.go b/provider/terraform/resources/resource_storage_notification.go new file mode 100644 index 000000000000..7dfef0d56a6e --- /dev/null +++ b/provider/terraform/resources/resource_storage_notification.go @@ -0,0 +1,149 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/storage/v1" +) + +func resourceStorageNotification() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageNotificationCreate, + Read: resourceStorageNotificationRead, + Delete: resourceStorageNotificationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "payload_format": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"JSON_API_V1", "NONE"}, false), + }, + + "topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + + "custom_attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "event_types": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE", "OBJECT_DELETE", "OBJECT_ARCHIVE"}, + false), + }, + }, + + "object_name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceStorageNotificationCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + topicName := d.Get("topic").(string) + computedTopicName := getComputedTopicName("", topicName) + if computedTopicName != topicName { + project, err := getProject(d, config) + if err != nil { + return err + } + computedTopicName = getComputedTopicName(project, topicName) + } + + storageNotification := &storage.Notification{ + CustomAttributes: expandStringMap(d, "custom_attributes"), + EventTypes: convertStringSet(d.Get("event_types").(*schema.Set)), + ObjectNamePrefix: d.Get("object_name_prefix").(string), + PayloadFormat: d.Get("payload_format").(string), + Topic: computedTopicName, + } + + res, err := config.clientStorage.Notifications.Insert(bucket, storageNotification).Do() + if err != nil { + return fmt.Errorf("Error creating notification config for bucket %s: %v", bucket, err) + } + + d.SetId(fmt.Sprintf("%s/notificationConfigs/%s", bucket, res.Id)) + + return resourceStorageNotificationRead(d, meta) +} + +func resourceStorageNotificationRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket, notificationID := resourceStorageNotificationParseID(d.Id()) + + res, err := config.clientStorage.Notifications.Get(bucket, notificationID).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Notification configuration %s for bucket %s", notificationID, bucket)) + } + + d.Set("bucket", bucket) + d.Set("payload_format", res.PayloadFormat) + d.Set("topic", res.Topic) + d.Set("object_name_prefix", res.ObjectNamePrefix) + d.Set("event_types", res.EventTypes) + d.Set("self_link", res.SelfLink) + d.Set("custom_attributes", res.CustomAttributes) + + return nil +} + +func resourceStorageNotificationDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket, notificationID := resourceStorageNotificationParseID(d.Id()) + + err := config.clientStorage.Notifications.Delete(bucket, notificationID).Do() + if err != nil { + return fmt.Errorf("Error deleting notification configuration %s for bucket %s: %v", notificationID, bucket, err) + } + + return nil +} + +func resourceStorageNotificationParseID(id string) (string, string) { + //bucket, NotificationID + parts := strings.Split(id, "/") + + return parts[0], parts[2] +} diff --git a/provider/terraform/resources/resource_storage_object_acl.go b/provider/terraform/resources/resource_storage_object_acl.go new file mode 100644 index 000000000000..15e03a5118d7 --- /dev/null +++ b/provider/terraform/resources/resource_storage_object_acl.go @@ -0,0 +1,253 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageObjectAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageObjectAclCreate, + Read: resourceStorageObjectAclRead, + Update: resourceStorageObjectAclUpdate, + Delete: resourceStorageObjectAclDelete, + CustomizeDiff: resourceStorageRoleEntityCustomizeDiff, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "object": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func getObjectAclId(object string) string { + return object + "-acl" +} + +func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + predefined_acl := "" + role_entity := make([]interface{}, 0) + + if v, ok := d.GetOk("predefined_acl"); ok { + predefined_acl = v.(string) + } + + if v, ok := d.GetOk("role_entity"); ok { + role_entity = v.([]interface{}) + } + + if len(predefined_acl) > 0 { + if len(role_entity) > 0 { + return fmt.Errorf("Error, you cannot specify both " + + "\"predefined_acl\" and \"role_entity\"") + } + + res, err := config.clientStorage.Objects.Get(bucket, object).Do() + + if err != nil { + return fmt.Errorf("Error reading object %s: %v", bucket, err) + } + + res, err = config.clientStorage.Objects.Update(bucket, object, + res).PredefinedAcl(predefined_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating object %s: %v", bucket, err) + } + + return resourceStorageObjectAclRead(d, meta) + } else if len(role_entity) > 0 { + for _, v := range role_entity { + pair, err := getRoleEntityPair(v.(string)) + + objectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: setting role = %s, entity = %s", pair.Role, pair.Entity) + + _, err = config.clientStorage.ObjectAccessControls.Insert(bucket, + object, objectAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error setting ACL for %s on object %s: %v", pair.Entity, object, err) + } + } + + return resourceStorageObjectAclRead(d, meta) + } + + return fmt.Errorf("Error, you must specify either " + + "\"predefined_acl\" or \"role_entity\"") +} + +func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + // Predefined ACLs cannot easily be parsed once they have been processed + // by the GCP server + if _, ok := d.GetOk("predefined_acl"); !ok { + role_entity := make([]interface{}, 0) + re_local := d.Get("role_entity").([]interface{}) + re_local_map := make(map[string]string) + for _, v := range re_local { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + re_local_map[res.Entity] = res.Role + } + + res, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Object ACL for Bucket %q", d.Get("bucket").(string))) + } + + for _, v := range res.Items { + role := v.Role + entity := v.Entity + if _, in := re_local_map[entity]; in { + role_entity = append(role_entity, fmt.Sprintf("%s:%s", role, entity)) + log.Printf("[DEBUG]: saving re %s-%s", role, entity) + } + } + + d.Set("role_entity", role_entity) + } else { + d.Set("role_entity", nil) + } + + d.SetId(getObjectAclId(object)) + return nil +} + +func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + if d.HasChange("role_entity") { + o, n := d.GetChange("role_entity") + old_re, new_re := o.([]interface{}), n.([]interface{}) + + old_re_map := make(map[string]string) + for _, v := range old_re { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + old_re_map[res.Entity] = res.Role + } + + for _, v := range new_re { + pair, err := getRoleEntityPair(v.(string)) + + objectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state is missing this entity, it needs to + // be created. Otherwise it is updated + if _, ok := old_re_map[pair.Entity]; ok { + _, err = config.clientStorage.ObjectAccessControls.Update( + bucket, object, pair.Entity, objectAccessControl).Do() + } else { + _, err = config.clientStorage.ObjectAccessControls.Insert( + bucket, object, objectAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(old_re_map, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) + } + } + + for entity, _ := range old_re_map { + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.clientStorage.ObjectAccessControls.Delete(bucket, object, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) + } + } + + return resourceStorageObjectAclRead(d, meta) + } + + return nil +} + +func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + re_local := d.Get("role_entity").([]interface{}) + for _, v := range re_local { + res, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + + entity := res.Entity + + log.Printf("[DEBUG]: removing entity %s", entity) + + err = config.clientStorage.ObjectAccessControls.Delete(bucket, object, + entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", + entity, err) + } + } + + return nil +} diff --git a/provider/terraform/resources/resource_usage_export_bucket.go b/provider/terraform/resources/resource_usage_export_bucket.go new file mode 100644 index 000000000000..7e87e4261b17 --- /dev/null +++ b/provider/terraform/resources/resource_usage_export_bucket.go @@ -0,0 +1,119 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceProjectUsageBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceProjectUsageBucketCreate, + Read: resourceProjectUsageBucketRead, + Delete: resourceProjectUsageBucketDelete, + Importer: &schema.ResourceImporter{ + State: resourceProjectUsageBucketImportState, + }, + + Schema: map[string]*schema.Schema{ + "bucket_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceProjectUsageBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + p, err := config.clientCompute.Projects.Get(project).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Project data for project %s", project)) + } + + if p.UsageExportLocation == nil { + log.Printf("[WARN] Removing usage export location resource %s because it's not enabled server-side.", project) + d.SetId("") + } + + d.Set("project", project) + d.Set("prefix", p.UsageExportLocation.ReportNamePrefix) + d.Set("bucket_name", p.UsageExportLocation.BucketName) + return nil +} + +func resourceProjectUsageBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.Projects.SetUsageExportBucket(project, &compute.UsageExportLocation{ + ReportNamePrefix: d.Get("prefix").(string), + BucketName: d.Get("bucket_name").(string), + }).Do() + if err != nil { + return err + } + d.SetId(project) + err = computeOperationWait(config.clientCompute, op, project, "Setting usage export bucket.") + if err != nil { + d.SetId("") + return err + } + + d.Set("project", project) + + return resourceProjectUsageBucketRead(d, meta) +} + +func resourceProjectUsageBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.Projects.SetUsageExportBucket(project, nil).Do() + if err != nil { + return err + } + + err = computeOperationWait(config.clientCompute, op, project, + "Setting usage export bucket to nil, automatically disabling usage export.") + if err != nil { + return err + } + d.SetId("") + + return nil +} + +func resourceProjectUsageBucketImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + project := d.Id() + d.Set("project", project) + return []*schema.ResourceData{d}, nil +} diff --git a/provider/terraform/resources/resourcemanager_operation.go b/provider/terraform/resources/resourcemanager_operation.go new file mode 100644 index 000000000000..239730ed3309 --- /dev/null +++ b/provider/terraform/resources/resourcemanager_operation.go @@ -0,0 +1,86 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/cloudresourcemanager/v1" + resourceManagerV2Beta1 "google.golang.org/api/cloudresourcemanager/v2beta1" +) + +type ResourceManagerOperationWaiter struct { + Service *cloudresourcemanager.Service + Op *cloudresourcemanager.Operation +} + +func (w *ResourceManagerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + op, err := w.Service.Operations.Get(w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %v while polling for operation %s's 'done' status", op.Done, w.Op.Name) + + return op, fmt.Sprint(op.Done), nil + } +} + +func (w *ResourceManagerOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: w.RefreshFunc(), + } +} + +func resourceManagerOperationWait(service *cloudresourcemanager.Service, op *cloudresourcemanager.Operation, activity string) error { + return resourceManagerOperationWaitTime(service, op, activity, 4) +} + +func resourceManagerOperationWaitTime(service *cloudresourcemanager.Service, op *cloudresourcemanager.Operation, activity string, timeoutMin int) error { + if op.Done { + if op.Error != nil { + return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) + } + return nil + } + + w := &ResourceManagerOperationWaiter{ + Service: service, + Op: op, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(timeoutMin) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*cloudresourcemanager.Operation) + if op.Error != nil { + return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) + } + + return nil +} + +func resourceManagerV2Beta1OperationWait(service *cloudresourcemanager.Service, op *resourceManagerV2Beta1.Operation, activity string) error { + return resourceManagerV2Beta1OperationWaitTime(service, op, activity, 4) +} + +func resourceManagerV2Beta1OperationWaitTime(service *cloudresourcemanager.Service, op *resourceManagerV2Beta1.Operation, activity string, timeoutMin int) error { + opV1 := &cloudresourcemanager.Operation{} + err := Convert(op, opV1) + if err != nil { + return err + } + + return resourceManagerOperationWaitTime(service, opV1, activity, timeoutMin) +} diff --git a/templates/terraform/data_sources/data_source_dns_managed_zone.go b/templates/terraform/data_sources/data_source_dns_managed_zone.go new file mode 100644 index 000000000000..05f96458e9e5 --- /dev/null +++ b/templates/terraform/data_sources/data_source_dns_managed_zone.go @@ -0,0 +1,65 @@ +package google + +import "github.com/hashicorp/terraform/helper/schema" + +func dataSourceDnsManagedZone() *schema.Resource { + return &schema.Resource{ + Read: dataSourceDnsManagedZoneRead, + + Schema: map[string]*schema.Schema{ + "dns_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name_servers": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.SetId(d.Get("name").(string)) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := config.clientDns.ManagedZones.Get( + project, d.Id()).Do() + if err != nil { + return err + } + + d.Set("name_servers", zone.NameServers) + d.Set("name", zone.Name) + d.Set("dns_name", zone.DnsName) + d.Set("description", zone.Description) + + return nil +} diff --git a/templates/terraform/data_sources/data_source_google_compute_address.go b/templates/terraform/data_sources/data_source_google_compute_address.go new file mode 100644 index 000000000000..137c4157afff --- /dev/null +++ b/templates/terraform/data_sources/data_source_google_compute_address.go @@ -0,0 +1,143 @@ +package google + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" +) + +var ( + computeAddressIdTemplate = "projects/%s/regions/%s/addresses/%s" + computeAddressLinkRegex = regexp.MustCompile("projects/(.+)/regions/(.+)/addresses/(.+)$") +) + +func dataSourceGoogleComputeAddress() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeAddressRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + address, err := config.clientCompute.Addresses.Get(project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Address Not Found : %s", name)) + } + + d.Set("address", address.Address) + d.Set("status", address.Status) + d.Set("self_link", address.SelfLink) + d.Set("project", project) + d.Set("region", region) + + d.SetId(strconv.FormatUint(uint64(address.Id), 10)) + return nil +} + +type computeAddressId struct { + Project string + Region string + Name string +} + +func (s computeAddressId) canonicalId() string { + return fmt.Sprintf(computeAddressIdTemplate, s.Project, s.Region, s.Name) +} + +func parseComputeAddressId(id string, config *Config) (*computeAddressId, error) { + var parts []string + if computeAddressLinkRegex.MatchString(id) { + parts = computeAddressLinkRegex.FindStringSubmatch(id) + + return &computeAddressId{ + Project: parts[1], + Region: parts[2], + Name: parts[3], + }, nil + } else { + parts = strings.Split(id, "/") + } + + if len(parts) == 3 { + return &computeAddressId{ + Project: parts[0], + Region: parts[1], + Name: parts[2], + }, nil + } else if len(parts) == 2 { + // Project is optional. + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{region}/{name}` id format.") + } + + return &computeAddressId{ + Project: config.Project, + Region: parts[0], + Name: parts[1], + }, nil + } else if len(parts) == 1 { + // Project and region is optional + if config.Project == "" { + return nil, fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") + } + if config.Region == "" { + return nil, fmt.Errorf("The default region for the provider must be set when using the `{name}` id format.") + } + + return &computeAddressId{ + Project: config.Project, + Region: config.Region, + Name: parts[0], + }, nil + } + + return nil, fmt.Errorf("Invalid compute address id. Expecting resource link, `{project}/{region}/{name}`, `{region}/{name}` or `{name}` format.") +} diff --git a/templates/terraform/resources/resource_compute_network.go b/templates/terraform/resources/resource_compute_network.go new file mode 100644 index 000000000000..c30054e9901f --- /dev/null +++ b/templates/terraform/resources/resource_compute_network.go @@ -0,0 +1,213 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeNetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkCreate, + Read: resourceComputeNetworkRead, + Update: resourceComputeNetworkUpdate, + Delete: resourceComputeNetworkDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "auto_create_subnetworks": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "routing_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ipv4_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + // This needs to remain deprecated until the API is retired + Deprecated: "Please use google_compute_subnetwork resources instead.", + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // + // Possible modes: + // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be + // set (enforced by ConflictsWith schema attribute) + // - 2 Distributed Mode - Create a new generation network that supports subnetworks: + // - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region + // - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, + // + autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool) + if autoCreateSubnetworks && d.Get("ipv4_range").(string) != "" { + return fmt.Errorf("ipv4_range can't be set if auto_create_subnetworks is true.") + } + + // Build the network parameter + network := &compute.Network{ + Name: d.Get("name").(string), + AutoCreateSubnetworks: autoCreateSubnetworks, + Description: d.Get("description").(string), + } + + if v, ok := d.GetOk("routing_mode"); ok { + routingConfig := &compute.NetworkRoutingConfig{ + RoutingMode: v.(string), + } + network.RoutingConfig = routingConfig + } + + if v, ok := d.GetOk("ipv4_range"); ok { + log.Printf("[DEBUG] Setting IPv4Range (%#v) for legacy network mode", v.(string)) + network.IPv4Range = v.(string) + } else { + // custom subnet mode, so make sure AutoCreateSubnetworks field is included in request otherwise + // google will create a network in legacy mode. + network.ForceSendFields = []string{"AutoCreateSubnetworks"} + } + log.Printf("[DEBUG] Network insert request: %#v", network) + op, err := config.clientCompute.Networks.Insert( + project, network).Do() + if err != nil { + return fmt.Errorf("Error creating network: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(network.Name) + + err = computeOperationWait(config.clientCompute, op, project, "Creating Network") + if err != nil { + return err + } + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + network, err := config.clientCompute.Networks.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Network %q", d.Get("name").(string))) + } + + routingConfig := network.RoutingConfig + + d.Set("routing_mode", routingConfig.RoutingMode) + d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("ipv4_range", network.IPv4Range) + d.Set("self_link", network.SelfLink) + d.Set("name", network.Name) + d.Set("description", network.Description) + d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks) + d.Set("project", project) + + return nil +} + +func resourceComputeNetworkUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.Networks.Patch(project, d.Id(), &compute.Network{ + RoutingConfig: &compute.NetworkRoutingConfig{ + RoutingMode: d.Get("routing_mode").(string), + }, + }).Do() + + if err != nil { + return fmt.Errorf("Error updating network: %s", err) + } + + err = computeSharedOperationWait(config.clientCompute, op, project, "UpdateNetwork") + if err != nil { + return err + } + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + return deleteComputeNetwork(project, d.Id(), config) +} + +func deleteComputeNetwork(project, network string, config *Config) error { + op, err := config.clientCompute.Networks.Delete( + project, network).Do() + if err != nil { + return fmt.Errorf("Error deleting network: %s", err) + } + + err = computeOperationWaitTime(config.clientCompute, op, project, "Deleting Network", 10) + if err != nil { + return err + } + return nil +}