Skip to content

Commit

Permalink
Add GKE Resource Consumption Metering, promote resource export… (#3303)…
Browse files Browse the repository at this point in the history
… (#5990)

* Add GKE Resource Consumption Metering, promote resource export to GA

* Add docs

* Spacing

Signed-off-by: Modular Magician <[email protected]>
  • Loading branch information
modular-magician authored Mar 26, 2020
1 parent 2d19c9b commit a646d86
Show file tree
Hide file tree
Showing 4 changed files with 207 additions and 0 deletions.
6 changes: 6 additions & 0 deletions .changelog/3303.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:enhancement
container: added `resource_usage_export_config` to `google_container_cluster`, previously only available in `google-beta` (ga only)
```
```release-note:enhancement
container: added `enable_resource_consumption_metering` to `resource_usage_export_config` in `google_container_cluster` (beta only)
```
115 changes: 115 additions & 0 deletions google/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -747,6 +747,39 @@ func resourceContainerCluster() *schema.Resource {
},
},

"resource_usage_export_config": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"enable_network_egress_metering": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"enable_resource_consumption_metering": {
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"bigquery_destination": {
Type: schema.TypeList,
MaxItems: 1,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"dataset_id": {
Type: schema.TypeString,
Required: true,
},
},
},
},
},
},
},

"enable_intranode_visibility": {
Type: schema.TypeBool,
Optional: true,
Expand Down Expand Up @@ -937,6 +970,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster.VerticalPodAutoscaling = expandVerticalPodAutoscaling(v)
}

if v, ok := d.GetOk("resource_usage_export_config"); ok {
cluster.ResourceUsageExportConfig = expandResourceUsageExportConfig(v)
}

req := &containerBeta.CreateClusterRequest{
Cluster: cluster,
}
Expand Down Expand Up @@ -1137,6 +1174,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
d.Set("resource_labels", cluster.ResourceLabels)
d.Set("label_fingerprint", cluster.LabelFingerprint)

if err := d.Set("resource_usage_export_config", flattenResourceUsageExportConfig(cluster.ResourceUsageExportConfig)); err != nil {
return err
}

return nil
}

Expand Down Expand Up @@ -1631,6 +1672,31 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
}
}

if d.HasChange("resource_usage_export_config") {
c := d.Get("resource_usage_export_config")
req := &containerBeta.UpdateClusterRequest{
Update: &containerBeta.ClusterUpdate{
DesiredResourceUsageExportConfig: expandResourceUsageExportConfig(c),
},
}

updateF := func() error {
name := containerClusterFullName(project, location, clusterName)
op, err := config.clientContainerBeta.Projects.Locations.Clusters.Update(name, req).Do()
if err != nil {
return err
}
// Wait until it's updated
return containerOperationWait(config, op, project, location, "updating GKE cluster resource usage export config", timeoutInMinutes)
}
if err := lockedCall(lockKey, updateF); err != nil {
return err
}
log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id())

d.SetPartial("resource_usage_export_config")
}

d.Partial(false)

if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, d.Timeout(schema.TimeoutUpdate)); err != nil {
Expand Down Expand Up @@ -2088,6 +2154,34 @@ func expandDefaultMaxPodsConstraint(v interface{}) *containerBeta.MaxPodsConstra
MaxPodsPerNode: int64(v.(int)),
}
}
func expandResourceUsageExportConfig(configured interface{}) *containerBeta.ResourceUsageExportConfig {
l := configured.([]interface{})
if len(l) == 0 || l[0] == nil {
return &containerBeta.ResourceUsageExportConfig{}
}

resourceUsageConfig := l[0].(map[string]interface{})

result := &containerBeta.ResourceUsageExportConfig{
EnableNetworkEgressMetering: resourceUsageConfig["enable_network_egress_metering"].(bool),
ConsumptionMeteringConfig: &containerBeta.ConsumptionMeteringConfig{
Enabled: resourceUsageConfig["enable_resource_consumption_metering"].(bool),
ForceSendFields: []string{"Enabled"},
},
ForceSendFields: []string{"EnableNetworkEgressMetering"},
}
if _, ok := resourceUsageConfig["bigquery_destination"]; ok {
if len(resourceUsageConfig["bigquery_destination"].([]interface{})) > 0 {
bigqueryDestination := resourceUsageConfig["bigquery_destination"].([]interface{})[0].(map[string]interface{})
if _, ok := bigqueryDestination["dataset_id"]; ok {
result.BigqueryDestination = &containerBeta.BigQueryDestination{
DatasetId: bigqueryDestination["dataset_id"].(string),
}
}
}
}
return result
}

func flattenNetworkPolicy(c *containerBeta.NetworkPolicy) []map[string]interface{} {
result := []map[string]interface{}{}
Expand Down Expand Up @@ -2317,6 +2411,27 @@ func flattenMasterAuthorizedNetworksConfig(c *containerBeta.MasterAuthorizedNetw
return []map[string]interface{}{result}
}

func flattenResourceUsageExportConfig(c *containerBeta.ResourceUsageExportConfig) []map[string]interface{} {
if c == nil {
return nil
}

enableResourceConsumptionMetering := false
if c.ConsumptionMeteringConfig != nil && c.ConsumptionMeteringConfig.Enabled == true {
enableResourceConsumptionMetering = true
}

return []map[string]interface{}{
{
"enable_network_egress_metering": c.EnableNetworkEgressMetering,
"enable_resource_consumption_metering": enableResourceConsumptionMetering,
"bigquery_destination": []map[string]interface{}{
{"dataset_id": c.BigqueryDestination.DatasetId},
},
},
}
}

func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)

Expand Down
79 changes: 79 additions & 0 deletions google/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1202,6 +1202,46 @@ func TestAccContainerCluster_errorNoClusterCreated(t *testing.T) {
}, testAccCheckContainerClusterDestroyProducer)
}

func TestAccContainerCluster_withResourceUsageExportConfig(t *testing.T) {
t.Parallel()

suffix := randString(t, 10)
clusterName := fmt.Sprintf("tf-test-cluster-%s", suffix)
datesetId := fmt.Sprintf("tf_test_cluster_resource_usage_%s", suffix)

vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withResourceUsageExportConfig(clusterName, datesetId, "true"),
},
{
ResourceName: "google_container_cluster.with_resource_usage_export_config",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withResourceUsageExportConfig(clusterName, datesetId, "false"),
},
{
ResourceName: "google_container_cluster.with_resource_usage_export_config",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccContainerCluster_withResourceUsageExportConfigNoConfig(clusterName, datesetId),
},
{
ResourceName: "google_container_cluster.with_resource_usage_export_config",
ImportState: true,
ImportStateVerify: true,
},
},
}, testAccCheckContainerClusterDestroyProducer)
}

func TestAccContainerCluster_withMasterAuthorizedNetworksDisabled(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -2503,6 +2543,45 @@ resource "google_container_cluster" "with_ip_allocation_policy" {
`, containerNetName, clusterName)
}

func testAccContainerCluster_withResourceUsageExportConfig(clusterName, datasetId, enableMetering string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "default" {
dataset_id = "%s"
description = "gke resource usage dataset tests"
delete_contents_on_destroy = true
}
resource "google_container_cluster" "with_resource_usage_export_config" {
name = "%s"
location = "us-central1-a"
initial_node_count = 1
resource_usage_export_config {
enable_network_egress_metering = true
enable_resource_consumption_metering = %s
bigquery_destination {
dataset_id = google_bigquery_dataset.default.dataset_id
}
}
}
`, datasetId, clusterName, enableMetering)
}

func testAccContainerCluster_withResourceUsageExportConfigNoConfig(clusterName, datasetId string) string {
return fmt.Sprintf(`
resource "google_bigquery_dataset" "default" {
dataset_id = "%s"
description = "gke resource usage dataset tests"
delete_contents_on_destroy = true
}
resource "google_container_cluster" "with_resource_usage_export_config" {
name = "%s"
location = "us-central1-a"
initial_node_count = 1
}
`, datasetId, clusterName)
}

func testAccContainerCluster_withPrivateClusterConfigMissingCidrBlock(containerNetName string, clusterName string) string {
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
Expand Down
7 changes: 7 additions & 0 deletions website/docs/r/container_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -664,13 +664,20 @@ The `resource_usage_export_config` block supports:
* `enable_network_egress_metering` (Optional) - Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created
in the cluster to meter network egress traffic.

* `enable_resource_consumption_metering` (Optional) - Whether to enable resource
consumption metering on this cluster. When enabled, a table will be created in
the resource export BigQuery dataset to store resource consumption data. The
resulting table can be joined with the resource usage table or with BigQuery
billing export. Defaults to `true`.

* `bigquery_destination` (Required) - Parameters for using BigQuery as the destination of resource usage export.

* `bigquery_destination.dataset_id` (Required) - The ID of a BigQuery Dataset. For Example:

```hcl
resource_usage_export_config {
enable_network_egress_metering = false
enable_resource_consumption_metering = true
bigquery_destination {
dataset_id = "cluster_resource_usage"
Expand Down

0 comments on commit a646d86

Please sign in to comment.