From 29f5cbcfc6a4876480e5eb93da1379334e7fa548 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Popi=C4=87?= Date: Thu, 25 Jun 2020 21:33:06 +0200 Subject: [PATCH] Add support for BigQuery hourly time partitioning (#6675) * Add TODO for checking the go library for HOUR support, update description and validation value. * Remove TODO from code and run gofmt. * Test table creation with HOUR instead of DAY. * Update google/resource_bigquery_table.go * Add testAccBigQueryTableHourlyTimePartitioning test. --- google/resource_bigquery_table.go | 8 +-- google/resource_bigquery_table_test.go | 92 +++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 6 deletions(-) diff --git a/google/resource_bigquery_table.go b/google/resource_bigquery_table.go index 7e723671616..01e92e4e85c 100644 --- a/google/resource_bigquery_table.go +++ b/google/resource_bigquery_table.go @@ -336,13 +336,13 @@ func resourceBigQueryTable() *schema.Resource { Description: `Number of milliseconds for which to keep the storage for a partition.`, }, - // Type: [Required] The only type supported is DAY, which will generate - // one partition per day based on data loading time. + // Type: [Required] The supported types are DAY and HOUR, which will generate + // one partition per day or hour based on data loading time. "type": { Type: schema.TypeString, Required: true, - Description: `The only type supported is DAY, which will generate one partition per day based on data loading time.`, - ValidateFunc: validation.StringInSlice([]string{"DAY"}, false), + Description: `The supported types are DAY and HOUR, which will generate one partition per day or hour based on data loading time.`, + ValidateFunc: validation.StringInSlice([]string{"DAY", "HOUR"}, false), }, // Field: [Optional] The field used to determine how to create a time-based diff --git a/google/resource_bigquery_table_test.go b/google/resource_bigquery_table_test.go index eb665179e22..5a6228d3f3b 100644 --- a/google/resource_bigquery_table_test.go +++ b/google/resource_bigquery_table_test.go @@ -20,7 +20,7 @@ func TestAccBigQueryTable_Basic(t *testing.T) { CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccBigQueryTable(datasetID, tableID), + Config: testAccBigQueryTableDailyTimePartitioning(datasetID, tableID), }, { ResourceName: "google_bigquery_table.test", @@ -64,6 +64,37 @@ func TestAccBigQueryTable_Kms(t *testing.T) { }) } +func TestAccBigQueryTable_HourlyTimePartitioning(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", randString(t, 10)) + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBigQueryTableUpdated(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccBigQueryTable_HivePartitioning(t *testing.T) { t.Parallel() bucketName := testBucketName(t) @@ -261,7 +292,7 @@ func testAccCheckBigQueryTableDestroyProducer(t *testing.T) func(s *terraform.St } } -func testAccBigQueryTable(datasetID, tableID string) string { +func testAccBigQueryTableDailyTimePartitioning(datasetID, tableID string) string { return fmt.Sprintf(` resource "google_bigquery_dataset" "test" { dataset_id = "%s" @@ -318,6 +349,63 @@ EOH `, datasetID, tableID) } +func testAccBigQueryTableHourlyTimePartitioning(datasetID, tableID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" +} + +resource "google_bigquery_table" "test" { + table_id = "%s" + dataset_id = google_bigquery_dataset.test.dataset_id + + time_partitioning { + type = "HOUR" + field = "ts" + require_partition_filter = true + } + clustering = ["some_int", "some_string"] + schema = <