From 87d0dab7153db627d574c4b01971b120839c70e7 Mon Sep 17 00:00:00 2001 From: The Magician Date: Mon, 4 Dec 2023 18:26:07 -0800 Subject: [PATCH] bigquery connection - spark connection type (#7498) (#16677) [upstream:2717aaf1a1716a081efbd63fd6d46fb8260e97d1] Signed-off-by: Modular Magician --- .changelog/7498.txt | 3 + .../resource_bigquery_connection.go | 213 +++++++++++++++++- ...urce_bigquery_connection_generated_test.go | 63 ++++++ .../docs/r/bigquery_connection.html.markdown | 77 +++++++ 4 files changed, 351 insertions(+), 5 deletions(-) create mode 100644 .changelog/7498.txt diff --git a/.changelog/7498.txt b/.changelog/7498.txt new file mode 100644 index 00000000000..81e0312657e --- /dev/null +++ b/.changelog/7498.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +bigqueryconnection - add `spark` support +``` diff --git a/google/services/bigqueryconnection/resource_bigquery_connection.go b/google/services/bigqueryconnection/resource_bigquery_connection.go index d9322916f94..ff3f17a99a3 100644 --- a/google/services/bigqueryconnection/resource_bigquery_connection.go +++ b/google/services/bigqueryconnection/resource_bigquery_connection.go @@ -83,7 +83,7 @@ func ResourceBigqueryConnectionConnection() *schema.Resource { }, }, }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource", "spark"}, }, "azure": { Type: schema.TypeList, @@ -129,7 +129,7 @@ func ResourceBigqueryConnectionConnection() *schema.Resource { }, }, }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource", "spark"}, }, "cloud_resource": { Type: schema.TypeList, @@ -145,7 +145,7 @@ func ResourceBigqueryConnectionConnection() *schema.Resource { }, }, }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource", "spark"}, }, "cloud_spanner": { Type: schema.TypeList, @@ -190,7 +190,7 @@ func ResourceBigqueryConnectionConnection() *schema.Resource { }, }, }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource", "spark"}, }, "cloud_sql": { Type: schema.TypeList, @@ -243,7 +243,7 @@ func ResourceBigqueryConnectionConnection() *schema.Resource { }, }, }, - ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource"}, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource", "spark"}, }, "connection_id": { Type: schema.TypeString, @@ -274,6 +274,52 @@ Spanner Connections same as spanner region AWS allowed regions are aws-us-east-1 Azure allowed regions are azure-eastus2`, }, + "spark": { + Type: schema.TypeList, + Optional: true, + Description: `Container for connection properties to execute stored procedures for Apache Spark. resources.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metastore_service_config": { + Type: schema.TypeList, + Optional: true, + Description: `Dataproc Metastore Service configuration for the connection.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metastore_service": { + Type: schema.TypeString, + Optional: true, + Description: `Resource name of an existing Dataproc Metastore service in the form of projects/[projectId]/locations/[region]/services/[serviceId].`, + }, + }, + }, + }, + "spark_history_server_config": { + Type: schema.TypeList, + Optional: true, + Description: `Spark History Server configuration for the connection.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_cluster": { + Type: schema.TypeString, + Optional: true, + Description: `Resource name of an existing Dataproc Cluster to act as a Spark History Server for the connection if the form of projects/[projectId]/regions/[region]/clusters/[cluster_name].`, + }, + }, + }, + }, + "service_account_id": { + Type: schema.TypeString, + Computed: true, + Description: `The account ID of the service created for the purpose of this connection.`, + }, + }, + }, + ExactlyOneOf: []string{"cloud_sql", "aws", "azure", "cloud_spanner", "cloud_resource", "spark"}, + }, "has_credential": { Type: schema.TypeBool, Computed: true, @@ -352,6 +398,12 @@ func resourceBigqueryConnectionConnectionCreate(d *schema.ResourceData, meta int } else if v, ok := d.GetOkExists("cloud_resource"); ok || !reflect.DeepEqual(v, cloudResourceProp) { obj["cloudResource"] = cloudResourceProp } + sparkProp, err := expandBigqueryConnectionConnectionSpark(d.Get("spark"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark"); ok || !reflect.DeepEqual(v, sparkProp) { + obj["spark"] = sparkProp + } obj, err = resourceBigqueryConnectionConnectionEncoder(d, meta, obj) if err != nil { @@ -490,6 +542,9 @@ func resourceBigqueryConnectionConnectionRead(d *schema.ResourceData, meta inter if err := d.Set("cloud_resource", flattenBigqueryConnectionConnectionCloudResource(res["cloudResource"], d, config)); err != nil { return fmt.Errorf("Error reading Connection: %s", err) } + if err := d.Set("spark", flattenBigqueryConnectionConnectionSpark(res["spark"], d, config)); err != nil { + return fmt.Errorf("Error reading Connection: %s", err) + } return nil } @@ -552,6 +607,12 @@ func resourceBigqueryConnectionConnectionUpdate(d *schema.ResourceData, meta int } else if v, ok := d.GetOkExists("cloud_resource"); ok || !reflect.DeepEqual(v, cloudResourceProp) { obj["cloudResource"] = cloudResourceProp } + sparkProp, err := expandBigqueryConnectionConnectionSpark(d.Get("spark"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("spark"); ok || !reflect.DeepEqual(v, sparkProp) { + obj["spark"] = sparkProp + } obj, err = resourceBigqueryConnectionConnectionEncoder(d, meta, obj) if err != nil { @@ -594,6 +655,10 @@ func resourceBigqueryConnectionConnectionUpdate(d *schema.ResourceData, meta int if d.HasChange("cloud_resource") { updateMask = append(updateMask, "cloudResource") } + + if d.HasChange("spark") { + updateMask = append(updateMask, "spark") + } // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) @@ -927,6 +992,61 @@ func flattenBigqueryConnectionConnectionCloudResourceServiceAccountId(v interfac return v } +func flattenBigqueryConnectionConnectionSpark(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["service_account_id"] = + flattenBigqueryConnectionConnectionSparkServiceAccountId(original["serviceAccountId"], d, config) + transformed["metastore_service_config"] = + flattenBigqueryConnectionConnectionSparkMetastoreServiceConfig(original["metastoreServiceConfig"], d, config) + transformed["spark_history_server_config"] = + flattenBigqueryConnectionConnectionSparkSparkHistoryServerConfig(original["sparkHistoryServerConfig"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionSparkServiceAccountId(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionSparkMetastoreServiceConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["metastore_service"] = + flattenBigqueryConnectionConnectionSparkMetastoreServiceConfigMetastoreService(original["metastoreService"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionSparkMetastoreServiceConfigMetastoreService(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBigqueryConnectionConnectionSparkSparkHistoryServerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataproc_cluster"] = + flattenBigqueryConnectionConnectionSparkSparkHistoryServerConfigDataprocCluster(original["dataprocCluster"], d, config) + return []interface{}{transformed} +} +func flattenBigqueryConnectionConnectionSparkSparkHistoryServerConfigDataprocCluster(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + func expandBigqueryConnectionConnectionConnectionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } @@ -1279,6 +1399,89 @@ func expandBigqueryConnectionConnectionCloudResourceServiceAccountId(v interface return v, nil } +func expandBigqueryConnectionConnectionSpark(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedServiceAccountId, err := expandBigqueryConnectionConnectionSparkServiceAccountId(original["service_account_id"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountId); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountId"] = transformedServiceAccountId + } + + transformedMetastoreServiceConfig, err := expandBigqueryConnectionConnectionSparkMetastoreServiceConfig(original["metastore_service_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetastoreServiceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metastoreServiceConfig"] = transformedMetastoreServiceConfig + } + + transformedSparkHistoryServerConfig, err := expandBigqueryConnectionConnectionSparkSparkHistoryServerConfig(original["spark_history_server_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSparkHistoryServerConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sparkHistoryServerConfig"] = transformedSparkHistoryServerConfig + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionSparkServiceAccountId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionSparkMetastoreServiceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetastoreService, err := expandBigqueryConnectionConnectionSparkMetastoreServiceConfigMetastoreService(original["metastore_service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetastoreService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metastoreService"] = transformedMetastoreService + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionSparkMetastoreServiceConfigMetastoreService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandBigqueryConnectionConnectionSparkSparkHistoryServerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDataprocCluster, err := expandBigqueryConnectionConnectionSparkSparkHistoryServerConfigDataprocCluster(original["dataproc_cluster"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataprocCluster); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataprocCluster"] = transformedDataprocCluster + } + + return transformed, nil +} + +func expandBigqueryConnectionConnectionSparkSparkHistoryServerConfigDataprocCluster(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func resourceBigqueryConnectionConnectionEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { // connection_id is needed to qualify the URL but cannot be sent in the body delete(obj, "connection_id") diff --git a/google/services/bigqueryconnection/resource_bigquery_connection_generated_test.go b/google/services/bigqueryconnection/resource_bigquery_connection_generated_test.go index 0f4c549563c..46d24ddc4d3 100644 --- a/google/services/bigqueryconnection/resource_bigquery_connection_generated_test.go +++ b/google/services/bigqueryconnection/resource_bigquery_connection_generated_test.go @@ -405,6 +405,69 @@ resource "google_bigquery_connection" "connection" { `, context) } +func TestAccBigqueryConnectionConnection_bigqueryConnectionSparkExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigqueryConnectionConnectionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigqueryConnectionConnection_bigqueryConnectionSparkExample(context), + }, + { + ResourceName: "google_bigquery_connection.connection", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + }, + }) +} + +func testAccBigqueryConnectionConnection_bigqueryConnectionSparkExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_bigquery_connection" "connection" { + connection_id = "tf-test-my-connection%{random_suffix}" + location = "US" + friendly_name = "👋" + description = "a riveting description" + spark { + spark_history_server_config { + dataproc_cluster = google_dataproc_cluster.basic.id + } + } +} + +resource "google_dataproc_cluster" "basic" { + name = "tf-test-my-connection%{random_suffix}" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + num_instances = 1 + machine_type = "e2-standard-2" + disk_config { + boot_disk_size_gb = 35 + } + } + } + } +`, context) +} + func testAccCheckBigqueryConnectionConnectionDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/website/docs/r/bigquery_connection.html.markdown b/website/docs/r/bigquery_connection.html.markdown index 3c854945da7..8084f285b56 100644 --- a/website/docs/r/bigquery_connection.html.markdown +++ b/website/docs/r/bigquery_connection.html.markdown @@ -235,6 +235,49 @@ resource "google_bigquery_connection" "connection" { } } ``` + +## Example Usage - Bigquery Connection Spark + + +```hcl +resource "google_bigquery_connection" "connection" { + connection_id = "my-connection" + location = "US" + friendly_name = "👋" + description = "a riveting description" + spark { + spark_history_server_config { + dataproc_cluster = google_dataproc_cluster.basic.id + } + } +} + +resource "google_dataproc_cluster" "basic" { + name = "my-connection" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + num_instances = 1 + machine_type = "e2-standard-2" + disk_config { + boot_disk_size_gb = 35 + } + } + } + } +``` ## Argument Reference @@ -292,6 +335,11 @@ The following arguments are supported: Container for connection properties for delegation of access to GCP resources. Structure is [documented below](#nested_cloud_resource). +* `spark` - + (Optional) + Container for connection properties to execute stored procedures for Apache Spark. resources. + Structure is [documented below](#nested_spark). + * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. @@ -414,6 +462,35 @@ The following arguments are supported: (Output) The account ID of the service created for the purpose of this connection. +The `spark` block supports: + +* `service_account_id` - + (Output) + The account ID of the service created for the purpose of this connection. + +* `metastore_service_config` - + (Optional) + Dataproc Metastore Service configuration for the connection. + Structure is [documented below](#nested_metastore_service_config). + +* `spark_history_server_config` - + (Optional) + Spark History Server configuration for the connection. + Structure is [documented below](#nested_spark_history_server_config). + + +The `metastore_service_config` block supports: + +* `metastore_service` - + (Optional) + Resource name of an existing Dataproc Metastore service in the form of projects/[projectId]/locations/[region]/services/[serviceId]. + +The `spark_history_server_config` block supports: + +* `dataproc_cluster` - + (Optional) + Resource name of an existing Dataproc Cluster to act as a Spark History Server for the connection if the form of projects/[projectId]/regions/[region]/clusters/[cluster_name]. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: