diff --git a/google/provider.go b/google/provider.go index 433b6555197..0c2511d485e 100644 --- a/google/provider.go +++ b/google/provider.go @@ -445,9 +445,9 @@ func Provider() terraform.ResourceProvider { return provider } -// Generated resources: 82 +// Generated resources: 83 // Generated IAM resources: 39 -// Total generated resources: 121 +// Total generated resources: 122 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -526,6 +526,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_compute_url_map": resourceComputeUrlMap(), "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), "google_container_analysis_note": resourceContainerAnalysisNote(), + "google_dataproc_autoscaling_policy": resourceDataprocAutoscalingPolicy(), "google_dns_managed_zone": resourceDNSManagedZone(), "google_filestore_instance": resourceFilestoreInstance(), "google_firestore_index": resourceFirestoreIndex(), diff --git a/google/resource_dataproc_autoscaling_policy.go b/google/resource_dataproc_autoscaling_policy.go new file mode 100644 index 00000000000..e159d4b7223 --- /dev/null +++ b/google/resource_dataproc_autoscaling_policy.go @@ -0,0 +1,784 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceDataprocAutoscalingPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocAutoscalingPolicyCreate, + Read: resourceDataprocAutoscalingPolicyRead, + Update: resourceDataprocAutoscalingPolicyUpdate, + Delete: resourceDataprocAutoscalingPolicyDelete, + + Importer: &schema.ResourceImporter{ + State: resourceDataprocAutoscalingPolicyImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "policy_id": { + Type: schema.TypeString, + Required: true, + Description: `The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), +and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between +3 and 50 characters.`, + }, + "basic_algorithm": { + Type: schema.TypeList, + Optional: true, + Description: `Basic algorithm for autoscaling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "yarn_config": { + Type: schema.TypeList, + Required: true, + Description: `YARN autoscaling configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "graceful_decommission_timeout": { + Type: schema.TypeString, + Required: true, + Description: `Timeout for YARN graceful decommissioning of Node Managers. Specifies the +duration to wait for jobs to complete before forcefully removing workers +(and potentially interrupting jobs). Only applicable to downscaling operations. + +Bounds: [0s, 1d].`, + }, + "scale_down_factor": { + Type: schema.TypeFloat, + Required: true, + Description: `Fraction of average pending memory in the last cooldown period for which to +remove workers. A scale-down factor of 1 will result in scaling down so that there +is no available memory remaining after the update (more aggressive scaling). +A scale-down factor of 0 disables removing workers, which can be beneficial for +autoscaling a single job. + +Bounds: [0.0, 1.0].`, + }, + "scale_up_factor": { + Type: schema.TypeFloat, + Required: true, + Description: `Fraction of average pending memory in the last cooldown period for which to +add workers. A scale-up factor of 1.0 will result in scaling up so that there +is no pending memory remaining after the update (more aggressive scaling). +A scale-up factor closer to 0 will result in a smaller magnitude of scaling up +(less aggressive scaling). + +Bounds: [0.0, 1.0].`, + }, + "scale_down_min_worker_fraction": { + Type: schema.TypeFloat, + Optional: true, + Description: `Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. +For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must +recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 +means the autoscaler will scale down on any recommended change. + +Bounds: [0.0, 1.0]. Default: 0.0.`, + Default: 0.0, + }, + "scale_up_min_worker_fraction": { + Type: schema.TypeFloat, + Optional: true, + Description: `Minimum scale-up threshold as a fraction of total cluster size before scaling +occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler +must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of +0 means the autoscaler will scale up on any recommended change. + +Bounds: [0.0, 1.0]. Default: 0.0.`, + Default: 0.0, + }, + }, + }, + }, + "cooldown_period": { + Type: schema.TypeString, + Optional: true, + Description: `Duration between scaling events. A scaling period starts after the +update operation from the previous event has completed. + +Bounds: [2m, 1d]. Default: 2m.`, + Default: "120s", + }, + }, + }, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location where the autoscaling poicy should reside. +The default value is 'global'.`, + Default: "global", + }, + "secondary_worker_config": { + Type: schema.TypeList, + Optional: true, + Description: `Describes how the autoscaler will operate for secondary workers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_instances": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum number of instances for this group. Note that by default, clusters will not use +secondary workers. Required for secondary workers if the minimum secondary instances is set. +Bounds: [minInstances, ). Defaults to 0.`, + Default: 0, + AtLeastOneOf: []string{"secondary_worker_config.0.min_instances", "secondary_worker_config.0.max_instances", "secondary_worker_config.0.weight"}, + }, + "min_instances": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.`, + Default: 0, + AtLeastOneOf: []string{"secondary_worker_config.0.min_instances", "secondary_worker_config.0.max_instances", "secondary_worker_config.0.weight"}, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + Description: `Weight for the instance group, which is used to determine the fraction of total workers +in the cluster from this instance group. For example, if primary workers have weight 2, +and secondary workers have weight 1, the cluster will have approximately 2 primary workers +for each secondary worker. + +The cluster may not reach the specified balance if constrained by min/max bounds or other +autoscaling settings. For example, if maxInstances for secondary workers is 0, then only +primary workers will be added. The cluster can also be out of balance when created. + +If weight is not set on any instance group, the cluster will default to equal weight for +all groups: the cluster will attempt to maintain an equal number of workers in each group +within the configured size bounds for each group. If weight is set for one group only, +the cluster will default to zero weight on the unset group. For example if weight is set +only on primary workers, the cluster will use primary workers only and no secondary workers.`, + Default: 1, + AtLeastOneOf: []string{"secondary_worker_config.0.min_instances", "secondary_worker_config.0.max_instances", "secondary_worker_config.0.weight"}, + }, + }, + }, + }, + "worker_config": { + Type: schema.TypeList, + Optional: true, + Description: `Describes how the autoscaler will operate for primary workers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_instances": { + Type: schema.TypeInt, + Required: true, + Description: `Maximum number of instances for this group.`, + }, + "min_instances": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.`, + Default: 2, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + Description: `Weight for the instance group, which is used to determine the fraction of total workers +in the cluster from this instance group. For example, if primary workers have weight 2, +and secondary workers have weight 1, the cluster will have approximately 2 primary workers +for each secondary worker. + +The cluster may not reach the specified balance if constrained by min/max bounds or other +autoscaling settings. For example, if maxInstances for secondary workers is 0, then only +primary workers will be added. The cluster can also be out of balance when created. + +If weight is not set on any instance group, the cluster will default to equal weight for +all groups: the cluster will attempt to maintain an equal number of workers in each group +within the configured size bounds for each group. If weight is set for one group only, +the cluster will default to zero weight on the unset group. For example if weight is set +only on primary workers, the cluster will use primary workers only and no secondary workers.`, + Default: 1, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The "resource name" of the autoscaling policy.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceDataprocAutoscalingPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + obj := make(map[string]interface{}) + idProp, err := expandDataprocAutoscalingPolicyPolicyId(d.Get("policy_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("policy_id"); !isEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + workerConfigProp, err := expandDataprocAutoscalingPolicyWorkerConfig(d.Get("worker_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("worker_config"); !isEmptyValue(reflect.ValueOf(workerConfigProp)) && (ok || !reflect.DeepEqual(v, workerConfigProp)) { + obj["workerConfig"] = workerConfigProp + } + secondaryWorkerConfigProp, err := expandDataprocAutoscalingPolicySecondaryWorkerConfig(d.Get("secondary_worker_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secondary_worker_config"); !isEmptyValue(reflect.ValueOf(secondaryWorkerConfigProp)) && (ok || !reflect.DeepEqual(v, secondaryWorkerConfigProp)) { + obj["secondaryWorkerConfig"] = secondaryWorkerConfigProp + } + basicAlgorithmProp, err := expandDataprocAutoscalingPolicyBasicAlgorithm(d.Get("basic_algorithm"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("basic_algorithm"); !isEmptyValue(reflect.ValueOf(basicAlgorithmProp)) && (ok || !reflect.DeepEqual(v, basicAlgorithmProp)) { + obj["basicAlgorithm"] = basicAlgorithmProp + } + + url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new AutoscalingPolicy: %#v", obj) + project, err := getProject(d, config) + if err != nil { + return err + } + res, err := sendRequestWithTimeout(config, "POST", project, url, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating AutoscalingPolicy: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating AutoscalingPolicy %q: %#v", d.Id(), res) + + return resourceDataprocAutoscalingPolicyRead(d, meta) +} + +func resourceDataprocAutoscalingPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + res, err := sendRequest(config, "GET", project, url, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DataprocAutoscalingPolicy %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading AutoscalingPolicy: %s", err) + } + + if err := d.Set("policy_id", flattenDataprocAutoscalingPolicyPolicyId(res["id"], d)); err != nil { + return fmt.Errorf("Error reading AutoscalingPolicy: %s", err) + } + if err := d.Set("name", flattenDataprocAutoscalingPolicyName(res["name"], d)); err != nil { + return fmt.Errorf("Error reading AutoscalingPolicy: %s", err) + } + if err := d.Set("worker_config", flattenDataprocAutoscalingPolicyWorkerConfig(res["workerConfig"], d)); err != nil { + return fmt.Errorf("Error reading AutoscalingPolicy: %s", err) + } + if err := d.Set("secondary_worker_config", flattenDataprocAutoscalingPolicySecondaryWorkerConfig(res["secondaryWorkerConfig"], d)); err != nil { + return fmt.Errorf("Error reading AutoscalingPolicy: %s", err) + } + if err := d.Set("basic_algorithm", flattenDataprocAutoscalingPolicyBasicAlgorithm(res["basicAlgorithm"], d)); err != nil { + return fmt.Errorf("Error reading AutoscalingPolicy: %s", err) + } + + return nil +} + +func resourceDataprocAutoscalingPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + idProp, err := expandDataprocAutoscalingPolicyPolicyId(d.Get("policy_id"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("policy_id"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + workerConfigProp, err := expandDataprocAutoscalingPolicyWorkerConfig(d.Get("worker_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("worker_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, workerConfigProp)) { + obj["workerConfig"] = workerConfigProp + } + secondaryWorkerConfigProp, err := expandDataprocAutoscalingPolicySecondaryWorkerConfig(d.Get("secondary_worker_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("secondary_worker_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, secondaryWorkerConfigProp)) { + obj["secondaryWorkerConfig"] = secondaryWorkerConfigProp + } + basicAlgorithmProp, err := expandDataprocAutoscalingPolicyBasicAlgorithm(d.Get("basic_algorithm"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("basic_algorithm"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, basicAlgorithmProp)) { + obj["basicAlgorithm"] = basicAlgorithmProp + } + + url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating AutoscalingPolicy %q: %#v", d.Id(), obj) + _, err = sendRequestWithTimeout(config, "PUT", project, url, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating AutoscalingPolicy %q: %s", d.Id(), err) + } + + return resourceDataprocAutoscalingPolicyRead(d, meta) +} + +func resourceDataprocAutoscalingPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting AutoscalingPolicy %q", d.Id()) + + res, err := sendRequestWithTimeout(config, "DELETE", project, url, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "AutoscalingPolicy") + } + + log.Printf("[DEBUG] Finished deleting AutoscalingPolicy %q: %#v", d.Id(), res) + return nil +} + +func resourceDataprocAutoscalingPolicyImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenDataprocAutoscalingPolicyPolicyId(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenDataprocAutoscalingPolicyName(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenDataprocAutoscalingPolicyWorkerConfig(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_instances"] = + flattenDataprocAutoscalingPolicyWorkerConfigMinInstances(original["minInstances"], d) + transformed["max_instances"] = + flattenDataprocAutoscalingPolicyWorkerConfigMaxInstances(original["maxInstances"], d) + transformed["weight"] = + flattenDataprocAutoscalingPolicyWorkerConfigWeight(original["weight"], d) + return []interface{}{transformed} +} +func flattenDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_instances"] = + flattenDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(original["minInstances"], d) + transformed["max_instances"] = + flattenDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(original["maxInstances"], d) + transformed["weight"] = + flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(original["weight"], d) + return []interface{}{transformed} +} +func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["cooldown_period"] = + flattenDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(original["cooldownPeriod"], d) + transformed["yarn_config"] = + flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(original["yarnConfig"], d) + return []interface{}{transformed} +} +func flattenDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["graceful_decommission_timeout"] = + flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(original["gracefulDecommissionTimeout"], d) + transformed["scale_up_factor"] = + flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(original["scaleUpFactor"], d) + transformed["scale_down_factor"] = + flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(original["scaleDownFactor"], d) + transformed["scale_up_min_worker_fraction"] = + flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(original["scaleUpMinWorkerFraction"], d) + transformed["scale_down_min_worker_fraction"] = + flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(original["scaleDownMinWorkerFraction"], d) + return []interface{}{transformed} +} +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func expandDataprocAutoscalingPolicyPolicyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyWorkerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinInstances, err := expandDataprocAutoscalingPolicyWorkerConfigMinInstances(original["min_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { + transformed["minInstances"] = transformedMinInstances + } + + transformedMaxInstances, err := expandDataprocAutoscalingPolicyWorkerConfigMaxInstances(original["max_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { + transformed["maxInstances"] = transformedMaxInstances + } + + transformedWeight, err := expandDataprocAutoscalingPolicyWorkerConfigWeight(original["weight"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + transformed["weight"] = transformedWeight + } + + return transformed, nil +} + +func expandDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMinInstances, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(original["min_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { + transformed["minInstances"] = transformedMinInstances + } + + transformedMaxInstances, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(original["max_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { + transformed["maxInstances"] = transformedMaxInstances + } + + transformedWeight, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigWeight(original["weight"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { + transformed["weight"] = transformedWeight + } + + return transformed, nil +} + +func expandDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedCooldownPeriod, err := expandDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(original["cooldown_period"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !isEmptyValue(val) { + transformed["cooldownPeriod"] = transformedCooldownPeriod + } + + transformedYarnConfig, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(original["yarn_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedYarnConfig); val.IsValid() && !isEmptyValue(val) { + transformed["yarnConfig"] = transformedYarnConfig + } + + return transformed, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedGracefulDecommissionTimeout, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(original["graceful_decommission_timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGracefulDecommissionTimeout); val.IsValid() && !isEmptyValue(val) { + transformed["gracefulDecommissionTimeout"] = transformedGracefulDecommissionTimeout + } + + transformedScaleUpFactor, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(original["scale_up_factor"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleUpFactor); val.IsValid() && !isEmptyValue(val) { + transformed["scaleUpFactor"] = transformedScaleUpFactor + } + + transformedScaleDownFactor, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(original["scale_down_factor"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleDownFactor); val.IsValid() && !isEmptyValue(val) { + transformed["scaleDownFactor"] = transformedScaleDownFactor + } + + transformedScaleUpMinWorkerFraction, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(original["scale_up_min_worker_fraction"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleUpMinWorkerFraction); val.IsValid() && !isEmptyValue(val) { + transformed["scaleUpMinWorkerFraction"] = transformedScaleUpMinWorkerFraction + } + + transformedScaleDownMinWorkerFraction, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(original["scale_down_min_worker_fraction"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScaleDownMinWorkerFraction); val.IsValid() && !isEmptyValue(val) { + transformed["scaleDownMinWorkerFraction"] = transformedScaleDownMinWorkerFraction + } + + return transformed, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/google/resource_dataproc_autoscaling_policy_generated_test.go b/google/resource_dataproc_autoscaling_policy_generated_test.go new file mode 100644 index 00000000000..2931e9b61eb --- /dev/null +++ b/google/resource_dataproc_autoscaling_policy_generated_test.go @@ -0,0 +1,107 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +func TestAccDataprocAutoscalingPolicy_dataprocAutoscalingPolicyExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(10), + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDataprocAutoscalingPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccDataprocAutoscalingPolicy_dataprocAutoscalingPolicyExample(context), + }, + { + ResourceName: "google_dataproc_autoscaling_policy.asp", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDataprocAutoscalingPolicy_dataprocAutoscalingPolicyExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-dataproc-test-%{random_suffix}" + region = "us-central1" + + cluster_config { + autoscaling_config { + policy_uri = google_dataproc_autoscaling_policy.asp.name + } + } +} + +resource "google_dataproc_autoscaling_policy" "asp" { + policy_id = "tf-dataproc-test-%{random_suffix}" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} +`, context) +} + +func testAccCheckDataprocAutoscalingPolicyDestroy(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_dataproc_autoscaling_policy" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := testAccProvider.Meta().(*Config) + + url, err := replaceVarsForTest(config, rs, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") + if err != nil { + return err + } + + _, err = sendRequest(config, "GET", "", url, nil) + if err == nil { + return fmt.Errorf("DataprocAutoscalingPolicy still exists at %s", url) + } + } + + return nil +} diff --git a/google/resource_dataproc_cluster.go b/google/resource_dataproc_cluster.go index e4db30de6f3..ad1f4411aa7 100644 --- a/google/resource_dataproc_cluster.go +++ b/google/resource_dataproc_cluster.go @@ -50,6 +50,7 @@ var ( "cluster_config.0.software_config", "cluster_config.0.initialization_action", "cluster_config.0.encryption_config", + "cluster_config.0.autoscaling_config", } ) @@ -391,6 +392,20 @@ func resourceDataprocCluster() *schema.Resource { }, }, }, + "autoscaling_config": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: clusterConfigKeys, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy_uri": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, }, }, }, @@ -616,6 +631,10 @@ func expandClusterConfig(d *schema.ResourceData, config *Config) (*dataproc.Clus conf.EncryptionConfig = expandEncryptionConfig(cfg) } + if cfg, ok := configOptions(d, "cluster_config.0.autoscaling_config"); ok { + conf.AutoscalingConfig = expandAutoscalingConfig(cfg) + } + if cfg, ok := configOptions(d, "cluster_config.0.master_config"); ok { log.Println("[INFO] got master_config") conf.MasterConfig = expandInstanceGroupConfig(cfg) @@ -718,6 +737,14 @@ func expandEncryptionConfig(cfg map[string]interface{}) *dataproc.EncryptionConf return conf } +func expandAutoscalingConfig(cfg map[string]interface{}) *dataproc.AutoscalingConfig { + conf := &dataproc.AutoscalingConfig{} + if v, ok := cfg["policy_uri"]; ok { + conf.PolicyUri = v.(string) + } + return conf +} + func expandInitializationActions(v interface{}) []*dataproc.NodeInitializationAction { actionList := v.([]interface{}) @@ -927,6 +954,7 @@ func flattenClusterConfig(d *schema.ResourceData, cfg *dataproc.ClusterConfig) ( "worker_config": flattenInstanceGroupConfig(d, cfg.WorkerConfig), "preemptible_worker_config": flattenPreemptibleInstanceGroupConfig(d, cfg.SecondaryWorkerConfig), "encryption_config": flattenEncryptionConfig(d, cfg.EncryptionConfig), + "autoscaling_config": flattenAutoscalingConfig(d, cfg.AutoscalingConfig), } if len(cfg.InitializationActions) > 0 { @@ -962,6 +990,18 @@ func flattenEncryptionConfig(d *schema.ResourceData, ec *dataproc.EncryptionConf return []map[string]interface{}{data} } +func flattenAutoscalingConfig(d *schema.ResourceData, ec *dataproc.AutoscalingConfig) []map[string]interface{} { + if ec == nil { + return nil + } + + data := map[string]interface{}{ + "policy_uri": ec.PolicyUri, + } + + return []map[string]interface{}{data} +} + func flattenAccelerators(accelerators []*dataproc.AcceleratorConfig) interface{} { acceleratorsTypeSet := schema.NewSet(schema.HashResource(acceleratorsSchema()), []interface{}{}) for _, accelerator := range accelerators { diff --git a/website/docs/r/dataproc_autoscaling_policy.html.markdown b/website/docs/r/dataproc_autoscaling_policy.html.markdown new file mode 100644 index 00000000000..396efa9738f --- /dev/null +++ b/website/docs/r/dataproc_autoscaling_policy.html.markdown @@ -0,0 +1,245 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud Dataproc" +layout: "google" +page_title: "Google: google_dataproc_autoscaling_policy" +sidebar_current: "docs-google-dataproc-autoscaling-policy" +description: |- + Describes an autoscaling policy for Dataproc cluster autoscaler. +--- + +# google\_dataproc\_autoscaling\_policy + +Describes an autoscaling policy for Dataproc cluster autoscaler. + + + + +## Example Usage - Dataproc Autoscaling Policy + + +```hcl +resource "google_dataproc_cluster" "basic" { + name = "tf-dataproc-test-" + region = "us-central1" + + cluster_config { + autoscaling_config { + policy_uri = google_dataproc_autoscaling_policy.asp.name + } + } +} + +resource "google_dataproc_autoscaling_policy" "asp" { + policy_id = "tf-dataproc-test-" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `policy_id` - + (Required) + The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), + and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between + 3 and 50 characters. + + +- - - + + +* `worker_config` - + (Optional) + Describes how the autoscaler will operate for primary workers. Structure is documented below. + +* `secondary_worker_config` - + (Optional) + Describes how the autoscaler will operate for secondary workers. Structure is documented below. + +* `basic_algorithm` - + (Optional) + Basic algorithm for autoscaling. Structure is documented below. + +* `location` - + (Optional) + The location where the autoscaling poicy should reside. + The default value is `global`. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +The `worker_config` block supports: + +* `min_instances` - + (Optional) + Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2. + +* `max_instances` - + (Required) + Maximum number of instances for this group. + +* `weight` - + (Optional) + Weight for the instance group, which is used to determine the fraction of total workers + in the cluster from this instance group. For example, if primary workers have weight 2, + and secondary workers have weight 1, the cluster will have approximately 2 primary workers + for each secondary worker. + The cluster may not reach the specified balance if constrained by min/max bounds or other + autoscaling settings. For example, if maxInstances for secondary workers is 0, then only + primary workers will be added. The cluster can also be out of balance when created. + If weight is not set on any instance group, the cluster will default to equal weight for + all groups: the cluster will attempt to maintain an equal number of workers in each group + within the configured size bounds for each group. If weight is set for one group only, + the cluster will default to zero weight on the unset group. For example if weight is set + only on primary workers, the cluster will use primary workers only and no secondary workers. + +The `secondary_worker_config` block supports: + +* `min_instances` - + (Optional) + Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0. + +* `max_instances` - + (Optional) + Maximum number of instances for this group. Note that by default, clusters will not use + secondary workers. Required for secondary workers if the minimum secondary instances is set. + Bounds: [minInstances, ). Defaults to 0. + +* `weight` - + (Optional) + Weight for the instance group, which is used to determine the fraction of total workers + in the cluster from this instance group. For example, if primary workers have weight 2, + and secondary workers have weight 1, the cluster will have approximately 2 primary workers + for each secondary worker. + The cluster may not reach the specified balance if constrained by min/max bounds or other + autoscaling settings. For example, if maxInstances for secondary workers is 0, then only + primary workers will be added. The cluster can also be out of balance when created. + If weight is not set on any instance group, the cluster will default to equal weight for + all groups: the cluster will attempt to maintain an equal number of workers in each group + within the configured size bounds for each group. If weight is set for one group only, + the cluster will default to zero weight on the unset group. For example if weight is set + only on primary workers, the cluster will use primary workers only and no secondary workers. + +The `basic_algorithm` block supports: + +* `cooldown_period` - + (Optional) + Duration between scaling events. A scaling period starts after the + update operation from the previous event has completed. + Bounds: [2m, 1d]. Default: 2m. + +* `yarn_config` - + (Required) + YARN autoscaling configuration. Structure is documented below. + + +The `yarn_config` block supports: + +* `graceful_decommission_timeout` - + (Required) + Timeout for YARN graceful decommissioning of Node Managers. Specifies the + duration to wait for jobs to complete before forcefully removing workers + (and potentially interrupting jobs). Only applicable to downscaling operations. + Bounds: [0s, 1d]. + +* `scale_up_factor` - + (Required) + Fraction of average pending memory in the last cooldown period for which to + add workers. A scale-up factor of 1.0 will result in scaling up so that there + is no pending memory remaining after the update (more aggressive scaling). + A scale-up factor closer to 0 will result in a smaller magnitude of scaling up + (less aggressive scaling). + Bounds: [0.0, 1.0]. + +* `scale_down_factor` - + (Required) + Fraction of average pending memory in the last cooldown period for which to + remove workers. A scale-down factor of 1 will result in scaling down so that there + is no available memory remaining after the update (more aggressive scaling). + A scale-down factor of 0 disables removing workers, which can be beneficial for + autoscaling a single job. + Bounds: [0.0, 1.0]. + +* `scale_up_min_worker_fraction` - + (Optional) + Minimum scale-up threshold as a fraction of total cluster size before scaling + occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of + 0 means the autoscaler will scale up on any recommended change. + Bounds: [0.0, 1.0]. Default: 0.0. + +* `scale_down_min_worker_fraction` - + (Optional) + Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. + For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must + recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 + means the autoscaler will scale down on any recommended change. + Bounds: [0.0, 1.0]. Default: 0.0. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + + +* `name` - + The "resource name" of the autoscaling policy. + + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 4 minutes. +- `update` - Default is 4 minutes. +- `delete` - Default is 4 minutes. + +## Import + +AutoscalingPolicy can be imported using any of these accepted formats: + +``` +$ terraform import google_dataproc_autoscaling_policy.default projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}} +$ terraform import google_dataproc_autoscaling_policy.default {{project}}/{{location}}/{{policy_id}} +$ terraform import google_dataproc_autoscaling_policy.default {{location}}/{{policy_id}} +``` + +-> If you're importing a resource with beta features, make sure to include `-provider=google-beta` +as an argument so that Terraform uses the correct provider to import your resource. + +## User Project Overrides + +This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override). diff --git a/website/google.erb b/website/google.erb index 73e7bb71e72..3abd17167f4 100644 --- a/website/google.erb +++ b/website/google.erb @@ -735,6 +735,10 @@ > Google Dataproc Resources