Skip to content

Commit

Permalink
Add BigTable source format in BigQuery tables (#4155) (#8923)
Browse files Browse the repository at this point in the history
* Add BigTable source format in BigQuery

* Add tests

* Fix test config typos

* Add bigtable external source in the bigquery tests

* typo

* Shrink resource name

* Fix master merge

* Ignore deletion_protection in TestAccBigQueryDataTable_bigtable

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Apr 15, 2021
1 parent 0cae004 commit 38e2913
Show file tree
Hide file tree
Showing 6 changed files with 83 additions and 5 deletions.
3 changes: 3 additions & 0 deletions .changelog/4155.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
bigquery: Added BigTable source format in BigQuery table
```
3 changes: 2 additions & 1 deletion google/resource_big_query_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,8 @@ row N is just skipped. Otherwise row N is used to extract column names for the d
ForceNew: true,
Description: `The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP".
For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET".
For orc, specify "ORC". The default value is CSV.`,
For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE".
The default value is CSV.`,
Default: "CSV",
},
"time_partitioning": {
Expand Down
2 changes: 1 addition & 1 deletion google/resource_bigquery_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ func resourceBigQueryTable() *schema.Resource {
Required: true,
Description: `The data format. Supported values are: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC" and "DATASTORE_BACKUP". To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`,
ValidateFunc: validation.StringInSlice([]string{
"CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "PARQUET", "ORC",
"CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE",
}, false),
},
// SourceURIs [Required] The fully-qualified URIs that point to your data in Google Cloud.
Expand Down
73 changes: 73 additions & 0 deletions google/resource_bigquery_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,32 @@ func TestAccBigQueryExternalDataTable_CSV(t *testing.T) {
})
}

func TestAccBigQueryDataTable_bigtable(t *testing.T) {
t.Parallel()

context := map[string]interface{}{
"random_suffix": randString(t, 8),
"project": getTestProjectFromEnv(),
}

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccBigQueryTableFromBigtable(context),
},
{
ResourceName: "google_bigquery_table.table",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"deletion_protection"},
},
},
})
}

func TestAccBigQueryDataTable_sheet(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -1406,6 +1432,53 @@ resource "google_bigquery_table" "test" {
`, datasetID, bucketName, objectName, content, tableID, format, quoteChar)
}

func testAccBigQueryTableFromBigtable(context map[string]interface{}) string {
return Nprintf(`
resource "google_bigtable_instance" "instance" {
name = "tf-test-bigtable-inst-%{random_suffix}"
cluster {
cluster_id = "tf-test-bigtable-%{random_suffix}"
zone = "us-central1-b"
}
instance_type = "DEVELOPMENT"
deletion_protection = false
}
resource "google_bigtable_table" "table" {
name = "%{random_suffix}"
instance_name = google_bigtable_instance.instance.name
column_family {
family = "cf-%{random_suffix}-first"
}
column_family {
family = "cf-%{random_suffix}-second"
}
}
resource "google_bigquery_table" "table" {
deletion_protection = false
dataset_id = google_bigquery_dataset.dataset.dataset_id
table_id = "tf_test_bigtable_%{random_suffix}"
external_data_configuration {
autodetect = true
source_format = "BIGTABLE"
ignore_unknown_values = true
source_uris = [
"https://googleapis.com/bigtable/${google_bigtable_table.table.id}",
]
}
}
resource "google_bigquery_dataset" "dataset" {
dataset_id = "tf_test_ds_%{random_suffix}"
friendly_name = "test"
description = "This is a test description"
location = "EU"
default_table_expiration_ms = 3600000
labels = {
env = "default"
}
}
`, context)
}

func testAccBigQueryTableFromSheet(context map[string]interface{}) string {
return Nprintf(`
resource "google_bigquery_table" "table" {
Expand Down
3 changes: 2 additions & 1 deletion website/docs/r/bigquery_job.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -644,7 +644,8 @@ The `load` block supports:
(Optional)
The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP".
For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET".
For orc, specify "ORC". The default value is CSV.
For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE".
The default value is CSV.

* `allow_jagged_rows` -
(Optional)
Expand Down
4 changes: 2 additions & 2 deletions website/docs/r/bigquery_table.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@ The `external_data_configuration` block supports:
`google_bigquery_table.schema`

* `source_format` (Required) - The data format. Supported values are:
"CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC"
and "DATASTORE_BACKUP". To use "GOOGLE_SHEETS"
"CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC",
"DATSTORE_BACKUP", and "BIGTABLE". To use "GOOGLE_SHEETS"
the `scopes` must include
"https://www.googleapis.com/auth/drive.readonly".

Expand Down

0 comments on commit 38e2913

Please sign in to comment.