diff --git a/.changelog/12378.txt b/.changelog/12378.txt new file mode 100644 index 00000000000..3511ea23c7d --- /dev/null +++ b/.changelog/12378.txt @@ -0,0 +1,3 @@ +```release-note:bug +dataproc: ensured oneOf condition is honored when expanding the job configuration for Hive, Pig, Spark-sql, and Presto in `google_dataproc_job`. +``` \ No newline at end of file diff --git a/google/services/dataproc/resource_dataproc_job.go b/google/services/dataproc/resource_dataproc_job.go index eb5c44518a6..ce3fec13bfd 100644 --- a/google/services/dataproc/resource_dataproc_job.go +++ b/google/services/dataproc/resource_dataproc_job.go @@ -931,11 +931,17 @@ func flattenHiveJob(job *dataproc.HiveJob) []map[string]interface{} { func expandHiveJob(config map[string]interface{}) *dataproc.HiveJob { job := &dataproc.HiveJob{} if v, ok := config["query_file_uri"]; ok { - job.QueryFileUri = v.(string) + queryFileUri := v.(string) + if len(queryFileUri) != 0 { + job.QueryFileUri = v.(string) + } } if v, ok := config["query_list"]; ok { - job.QueryList = &dataproc.QueryList{ - Queries: tpgresource.ConvertStringArr(v.([]interface{})), + queryList := v.([]interface{}) + if len(queryList) != 0 { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(queryList), + } } } if v, ok := config["continue_on_failure"]; ok { @@ -1039,11 +1045,17 @@ func flattenPigJob(job *dataproc.PigJob) []map[string]interface{} { func expandPigJob(config map[string]interface{}) *dataproc.PigJob { job := &dataproc.PigJob{} if v, ok := config["query_file_uri"]; ok { - job.QueryFileUri = v.(string) + queryFileUri := v.(string) + if len(queryFileUri) != 0 { + job.QueryFileUri = v.(string) + } } if v, ok := config["query_list"]; ok { - job.QueryList = &dataproc.QueryList{ - Queries: tpgresource.ConvertStringArr(v.([]interface{})), + queryList := v.([]interface{}) + if len(queryList) != 0 { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(queryList), + } } } if v, ok := config["continue_on_failure"]; ok { @@ -1140,11 +1152,17 @@ func flattenSparkSqlJob(job *dataproc.SparkSqlJob) []map[string]interface{} { func expandSparkSqlJob(config map[string]interface{}) *dataproc.SparkSqlJob { job := &dataproc.SparkSqlJob{} if v, ok := config["query_file_uri"]; ok { - job.QueryFileUri = v.(string) + queryFileUri := v.(string) + if len(queryFileUri) != 0 { + job.QueryFileUri = v.(string) + } } if v, ok := config["query_list"]; ok { - job.QueryList = &dataproc.QueryList{ - Queries: tpgresource.ConvertStringArr(v.([]interface{})), + queryList := v.([]interface{}) + if len(queryList) != 0 { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(queryList), + } } } if v, ok := config["script_variables"]; ok { @@ -1241,20 +1259,26 @@ func flattenPrestoJob(job *dataproc.PrestoJob) []map[string]interface{} { func expandPrestoJob(config map[string]interface{}) *dataproc.PrestoJob { job := &dataproc.PrestoJob{} + if v, ok := config["query_file_uri"]; ok { + queryFileUri := v.(string) + if len(queryFileUri) != 0 { + job.QueryFileUri = v.(string) + } + } + if v, ok := config["query_list"]; ok { + queryList := v.([]interface{}) + if len(queryList) != 0 { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(queryList), + } + } + } if v, ok := config["client_tags"]; ok { job.ClientTags = tpgresource.ConvertStringArr(v.([]interface{})) } if v, ok := config["continue_on_failure"]; ok { job.ContinueOnFailure = v.(bool) } - if v, ok := config["query_file_uri"]; ok { - job.QueryFileUri = v.(string) - } - if v, ok := config["query_list"]; ok { - job.QueryList = &dataproc.QueryList{ - Queries: tpgresource.ConvertStringArr(v.([]interface{})), - } - } if v, ok := config["properties"]; ok { job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) } diff --git a/google/services/dataproc/resource_dataproc_job_test.go b/google/services/dataproc/resource_dataproc_job_test.go index 0bae84ed1c3..b999560fe62 100644 --- a/google/services/dataproc/resource_dataproc_job_test.go +++ b/google/services/dataproc/resource_dataproc_job_test.go @@ -265,14 +265,9 @@ func TestAccDataprocJob_Pig(t *testing.T) { }) } -func TestAccDataprocJob_SparkSql(t *testing.T) { +func testAccDataprocJobSparkSql(t *testing.T, config string) { t.Parallel() - var job dataproc.Job - rnd := acctest.RandString(t, 10) - networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") - subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) - acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -280,7 +275,7 @@ func TestAccDataprocJob_SparkSql(t *testing.T) { CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccDataprocJob_sparksql(rnd, subnetworkName), + Config: config, Check: resource.ComposeTestCheckFunc( testAccCheckDataprocJobExists(t, "google_dataproc_job.sparksql", &job), @@ -301,6 +296,20 @@ func TestAccDataprocJob_SparkSql(t *testing.T) { }) } +func TestAccDataprocJob_SparkSql_QueryList(t *testing.T) { + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + testAccDataprocJobSparkSql(t, testAccDataprocJob_SparkSql_QueryList(rnd, subnetworkName)) +} + +func TestAccDataprocJob_SparkSql_QueryFile(t *testing.T) { + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + testAccDataprocJobSparkSql(t, testAccDataprocJob_SparkSql_QueryFile(rnd, subnetworkName)) +} + func TestAccDataprocJob_Presto(t *testing.T) { t.Parallel() @@ -833,7 +842,7 @@ resource "google_dataproc_job" "pig" { } -func testAccDataprocJob_sparksql(rnd, subnetworkName string) string { +func testAccDataprocJob_SparkSql_QueryList(rnd, subnetworkName string) string { return fmt.Sprintf( singleNodeClusterConfig+` resource "google_dataproc_job" "sparksql" { @@ -855,6 +864,24 @@ resource "google_dataproc_job" "sparksql" { } +func testAccDataprocJob_SparkSql_QueryFile(rnd, subnetworkName string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "sparksql" { + region = google_dataproc_cluster.basic.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.basic.name + } + + sparksql_config { + query_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/spark-sql/natality/cigarette_correlations.sql" + } +} +`, rnd, subnetworkName) + +} + func testAccDataprocJob_presto(rnd, subnetworkName string) string { return fmt.Sprintf(` resource "google_dataproc_cluster" "basic" {