Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upstream resource_logging_project_sink exclusions #2569

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/4027.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
logging: Added support for exclusions options for `google_logging_project_sink `
```
46 changes: 46 additions & 0 deletions google-beta/resource_logging_project_sink_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,28 @@ func TestAccLoggingProjectSink_heredoc(t *testing.T) {
})
}

func TestAccLoggingProjectSink_loggingbucket(t *testing.T) {
t.Parallel()

sinkName := "tf-test-sink-" + randString(t, 10)

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckLoggingProjectSinkDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccLoggingProjectSink_loggingbucket(sinkName, getTestProjectFromEnv()),
},
{
ResourceName: "google_logging_project_sink.loggingbucket",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func testAccCheckLoggingProjectSinkDestroyProducer(t *testing.T) func(s *terraform.State) error {
return func(s *terraform.State) error {
config := googleProviderConfig(t)
Expand Down Expand Up @@ -248,3 +270,27 @@ resource "google_bigquery_dataset" "logging_sink" {
}
`, sinkName, getTestProjectFromEnv(), getTestProjectFromEnv(), bqDatasetID)
}

func testAccLoggingProjectSink_loggingbucket(name, project string) string {
return fmt.Sprintf(`
resource "google_logging_project_sink" "loggingbucket" {
name = "%s"
project = "%s"
destination = "logging.googleapis.com/projects/%s/locations/global/buckets/_Default"
exclusions {
name = "ex1"
description = "test"
filter = "resource.type = k8s_container"
}

exclusions {
name = "ex2"
description = "test-2"
filter = "resource.type = k8s_container"
}

unique_writer_identity = true
}

`, name, project, project)
}
79 changes: 79 additions & 0 deletions google-beta/resource_logging_sink.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,37 @@ func resourceLoggingSinkSchema() map[string]*schema.Schema {
Description: `The filter to apply when exporting logs. Only log entries that match the filter are exported.`,
},

"exclusions": {
Type: schema.TypeList,
Optional: true,
Description: `Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported.`,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
Description: `A client-assigned identifier, such as "load-balancer-exclusion". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.`,
},
"description": {
Type: schema.TypeString,
Optional: true,
Description: `A description of this exclusion.`,
},
"filter": {
Type: schema.TypeString,
Required: true,
Description: `An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries`,
},
"disabled": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: `If set to True, then this exclusion is disabled and it does not exclude any log entries`,
},
},
},
},

"writer_identity": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -66,6 +97,7 @@ func expandResourceLoggingSink(d *schema.ResourceData, resourceType, resourceId
Name: d.Get("name").(string),
Destination: d.Get("destination").(string),
Filter: d.Get("filter").(string),
Exclusions: expandLoggingSinkExclusions(d.Get("exclusions")),
BigqueryOptions: expandLoggingSinkBigqueryOptions(d.Get("bigquery_options")),
}
return id, &sink
Expand All @@ -84,6 +116,9 @@ func flattenResourceLoggingSink(d *schema.ResourceData, sink *logging.LogSink) e
if err := d.Set("writer_identity", sink.WriterIdentity); err != nil {
return fmt.Errorf("Error setting writer_identity: %s", err)
}
if err := d.Set("exclusions", flattenLoggingSinkExclusion(sink.Exclusions)); err != nil {
return fmt.Errorf("Error setting exclusions: %s", err)
}
if err := d.Set("bigquery_options", flattenLoggingSinkBigqueryOptions(sink.BigqueryOptions)); err != nil {
return fmt.Errorf("Error setting bigquery_options: %s", err)
}
Expand All @@ -107,6 +142,10 @@ func expandResourceLoggingSinkForUpdate(d *schema.ResourceData) (sink *logging.L
if d.HasChange("filter") {
updateFields = append(updateFields, "filter")
}
if d.HasChange("exclusions") {
sink.Exclusions = expandLoggingSinkExclusions(d.Get("exclusions"))
updateFields = append(updateFields, "exclusions")
}
if d.HasChange("bigquery_options") {
sink.BigqueryOptions = expandLoggingSinkBigqueryOptions(d.Get("bigquery_options"))
updateFields = append(updateFields, "bigqueryOptions")
Expand Down Expand Up @@ -141,6 +180,46 @@ func flattenLoggingSinkBigqueryOptions(o *logging.BigQueryOptions) []map[string]
return []map[string]interface{}{oMap}
}

func expandLoggingSinkExclusions(v interface{}) []*logging.LogExclusion {
if v == nil {
return nil
}
exclusions := v.([]interface{})
if len(exclusions) == 0 {
return nil
}
results := make([]*logging.LogExclusion, 0, len(exclusions))
for _, e := range exclusions {
exclusion := e.(map[string]interface{})
results = append(results, &logging.LogExclusion{
Name: exclusion["name"].(string),
Description: exclusion["description"].(string),
Filter: exclusion["filter"].(string),
Disabled: exclusion["disabled"].(bool),
})
}
return results
}

func flattenLoggingSinkExclusion(exclusions []*logging.LogExclusion) []map[string]interface{} {
if exclusions == nil {
return nil
}
flattenedExclusions := make([]map[string]interface{}, 0, len(exclusions))
for _, e := range exclusions {
flattenedExclusion := map[string]interface{}{
"name": e.Name,
"description": e.Description,
"filter": e.Filter,
"disabled": e.Disabled,
}
flattenedExclusions = append(flattenedExclusions, flattenedExclusion)

}

return flattenedExclusions
}

func resourceLoggingSinkImportState(sinkType string) schema.StateFunc {
return func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
loggingSinkId, err := parseLoggingSinkId(d.Id())
Expand Down
34 changes: 34 additions & 0 deletions website/docs/r/logging_project_sink.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,30 @@ resource "google_project_iam_binding" "log-writer" {
}
```

The following example uses `exclusions` to filter logs that will not be exported. In this example logs are exported to a [log bucket](https://cloud.google.com/logging/docs/buckets) and there are 2 exclusions configured

```hcl
resource "google_logging_project_sink" "log-bucket" {
name = "my-logging-sink"
destination = "logging.googleapis.com/projects/my-project/locations/global/buckets/_Default"

exclusions {
name = "nsexcllusion1"
description = "Exclude logs from namespace-1 in k8s"
filter = "resource.type = k8s_container resource.labels.namespace_name=\"namespace-1\" "
}

exclusions {
name = "nsexcllusion2"
description = "Exclude logs from namespace-2 in k8s"
filter = "resource.type = k8s_container resource.labels.namespace_name=\"namespace-2\" "
}

unique_writer_identity = true
```



## Argument Reference

The following arguments are supported:
Expand Down Expand Up @@ -115,13 +139,23 @@ The following arguments are supported:

* `bigquery_options` - (Optional) Options that affect sinks exporting data to BigQuery. Structure documented below.

* `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is documented below.

The `bigquery_options` block supports:

* `use_partitioned_tables` - (Required) Whether to use [BigQuery's partition tables](https://cloud.google.com/bigquery/docs/partitioned-tables).
By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned
tables the date suffix is no longer present and [special query syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables)
has to be used instead. In both cases, tables are sharded based on UTC timezone.

The `exclusions` block support:

* `name` - (Required) A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.
* `description` - (Optional) A description of this exclusion.
* `filter` - (Required) An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
* `disabled` - (Optional) If set to True, then this exclusion is disabled and it does not exclude any log entries.

## Attributes Reference

In addition to the arguments listed above, the following computed attributes are
Expand Down