Skip to content

Commit

Permalink
Add require_partition_filter to BigQuery table with hive_partitioning (
Browse files Browse the repository at this point in the history
…#8775)

* Add require_partition_filter

* Fix flattenHivePartitioningOptions func

* Add description into docs
  • Loading branch information
Alvaro Ferreira authored Mar 31, 2021
1 parent b46b674 commit 1fdfc23
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 0 deletions.
16 changes: 16 additions & 0 deletions google/resource_bigquery_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,14 @@ func resourceBigQueryTable() *schema.Resource {
Optional: true,
Description: `When set, what mode of hive partitioning to use when reading data.`,
},
// RequirePartitionFilter: [Optional] If set to true, queries over this table
// require a partition filter that can be used for partition elimination to be
// specified.
"require_partition_filter": {
Type: schema.TypeBool,
Optional: true,
Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`,
},
// SourceUriPrefix: [Optional] [Experimental] When hive partition detection is requested, a common for all source uris must be required.
// The prefix must end immediately before the partition key encoding begins.
"source_uri_prefix": {
Expand Down Expand Up @@ -1410,6 +1418,10 @@ func expandHivePartitioningOptions(configured interface{}) *bigquery.HivePartiti
opts.Mode = v.(string)
}

if v, ok := raw["require_partition_filter"]; ok {
opts.RequirePartitionFilter = v.(bool)
}

if v, ok := raw["source_uri_prefix"]; ok {
opts.SourceUriPrefix = v.(string)
}
Expand All @@ -1424,6 +1436,10 @@ func flattenHivePartitioningOptions(opts *bigquery.HivePartitioningOptions) []ma
result["mode"] = opts.Mode
}

if opts.RequirePartitionFilter {
result["require_partition_filter"] = opts.RequirePartitionFilter
}

if opts.SourceUriPrefix != "" {
result["source_uri_prefix"] = opts.SourceUriPrefix
}
Expand Down
2 changes: 2 additions & 0 deletions google/resource_bigquery_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1006,6 +1006,7 @@ resource "google_bigquery_table" "test" {
hive_partitioning_options {
mode = "AUTO"
source_uri_prefix = "gs://${google_storage_bucket.test.name}/"
require_partition_filter = true
}
}
Expand Down Expand Up @@ -1044,6 +1045,7 @@ resource "google_bigquery_table" "test" {
hive_partitioning_options {
mode = "CUSTOM"
source_uri_prefix = "gs://${google_storage_bucket.test.name}/{key1:STRING}"
require_partition_filter = true
}
schema = <<EOH
Expand Down
4 changes: 4 additions & 0 deletions website/docs/r/bigquery_table.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,10 @@ The `hive_partitioning_options` block supports:
partitioning on an unsupported format will lead to an error.
Currently supported formats are: JSON, CSV, ORC, Avro and Parquet.
* CUSTOM: when set to `CUSTOM`, you must encode the partition key schema within the `source_uri_prefix` by setting `source_uri_prefix` to `gs://bucket/path_to_table/{key1:TYPE1}/{key2:TYPE2}/{key3:TYPE3}`.

* `require_partition_filter` - (Optional) If set to true, queries over this table
require a partition filter that can be used for partition elimination to be
specified.

* `source_uri_prefix` (Optional) - When hive partition detection is requested,
a common for all source uris must be required. The prefix must end immediately
Expand Down

0 comments on commit 1fdfc23

Please sign in to comment.