From f3da9acaeba49e0c9ee3260b70aea3189887f543 Mon Sep 17 00:00:00 2001 From: Marco Reni Date: Thu, 2 Jan 2020 09:00:28 +0100 Subject: [PATCH 1/4] GH-10553 aws_msk_cluster: support cluster expansion --- aws/resource_aws_msk_cluster.go | 25 +++++++++++++++- aws/resource_aws_msk_cluster_test.go | 44 ++++++++++++++++++++++++---- 2 files changed, 62 insertions(+), 7 deletions(-) diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index da9a453f7f1..c5688b911dd 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -205,7 +205,6 @@ func resourceAwsMskCluster() *schema.Resource { "number_of_broker_nodes": { Type: schema.TypeInt, Required: true, - ForceNew: true, }, "tags": tagsSchema(), "zookeeper_connect_string": { @@ -380,6 +379,30 @@ func resourceAwsMskClusterUpdate(d *schema.ResourceData, meta interface{}) error } } + if d.HasChange("number_of_broker_nodes") { + input := &kafka.UpdateBrokerCountInput{ + ClusterArn: aws.String(d.Id()), + CurrentVersion: aws.String(d.Get("current_version").(string)), + TargetNumberOfBrokerNodes: aws.Int64(int64(d.Get("number_of_broker_nodes").(int))), + } + + output, err := conn.UpdateBrokerCount(input) + + if err != nil { + return fmt.Errorf("error updating MSK Cluster (%s) broker count: %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error updating MSK Cluster (%s) broker count: empty response", d.Id()) + } + + clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + + if err := waitForMskClusterOperation(conn, clusterOperationARN); err != nil { + return fmt.Errorf("error waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) + } + } + if d.HasChange("configuration_info") { input := &kafka.UpdateClusterConfigurationInput{ ClusterArn: aws.String(d.Id()), diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index 541795c020b..67397e027fd 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -356,7 +356,7 @@ func TestAccAWSMskCluster_EnhancedMonitoring(t *testing.T) { } func TestAccAWSMskCluster_NumberOfBrokerNodes(t *testing.T) { - var cluster kafka.ClusterInfo + var cluster1, cluster2 kafka.ClusterInfo rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_msk_cluster.test" @@ -366,9 +366,33 @@ func TestAccAWSMskCluster_NumberOfBrokerNodes(t *testing.T) { CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfigNumberOfBrokerNodes(rName), + Config: testAccMskClusterConfigNumberOfBrokerNodes(rName, 3), Check: resource.ComposeTestCheckFunc( - testAccCheckMskClusterExists(resourceName, &cluster), + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), + resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers_tls", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.client_subnets.#", "3"), + resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.0", "aws_subnet.example_subnet_az1", "id"), + resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.1", "aws_subnet.example_subnet_az2", "id"), + resource.TestCheckResourceAttrPair(resourceName, "broker_node_group_info.0.client_subnets.2", "aws_subnet.example_subnet_az3", "id"), + resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + { + Config: testAccMskClusterConfigNumberOfBrokerNodes(rName, 6), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster2), + testAccCheckMskClusterNotRecreated(&cluster1, &cluster2), resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers_tls", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), @@ -823,12 +847,18 @@ resource "aws_msk_cluster" "test" { } -func testAccMskClusterConfigNumberOfBrokerNodes(rName string) string { +func testAccMskClusterConfigNumberOfBrokerNodes(rName string, brokerCount int) string { return testAccMskClusterBaseConfig() + fmt.Sprintf(` resource "aws_msk_cluster" "test" { cluster_name = %[1]q kafka_version = "2.2.1" - number_of_broker_nodes = 6 + number_of_broker_nodes = %[2]d + + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] @@ -837,7 +867,9 @@ resource "aws_msk_cluster" "test" { security_groups = ["${aws_security_group.example_sg.id}"] } } -`, rName) +`, rName, brokerCount) + +} } From b803ff9a6885aeaf96024abdaa6265d732d0d6b4 Mon Sep 17 00:00:00 2001 From: Marco Reni Date: Thu, 2 Jan 2020 09:04:12 +0100 Subject: [PATCH 2/4] GH-11215 aws_msk_cluster: support open monitoring --- aws/resource_aws_msk_cluster.go | 185 ++++++++++++++++++++++- aws/resource_aws_msk_cluster_test.go | 89 ++++++++++- website/docs/r/msk_cluster.html.markdown | 29 ++++ 3 files changed, 300 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index c5688b911dd..7df42086071 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -189,7 +189,6 @@ func resourceAwsMskCluster() *schema.Resource { Type: schema.TypeString, Optional: true, Default: kafka.EnhancedMonitoringDefault, - ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ kafka.EnhancedMonitoringDefault, kafka.EnhancedMonitoringPerBroker, @@ -206,6 +205,54 @@ func resourceAwsMskCluster() *schema.Resource { Type: schema.TypeInt, Required: true, }, + "open_monitoring": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prometheus": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jmx_exporter": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled_in_broker": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "node_exporter": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled_in_broker": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "tags": tagsSchema(), "zookeeper_connect_string": { Type: schema.TypeString, @@ -227,6 +274,7 @@ func resourceAwsMskClusterCreate(d *schema.ResourceData, meta interface{}) error EnhancedMonitoring: aws.String(d.Get("enhanced_monitoring").(string)), KafkaVersion: aws.String(d.Get("kafka_version").(string)), NumberOfBrokerNodes: aws.Int64(int64(d.Get("number_of_broker_nodes").(int))), + OpenMonitoring: expandMskOpenMonitoring(d.Get("open_monitoring").([]interface{})), Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().KafkaTags(), } @@ -342,6 +390,10 @@ func resourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting tags: %s", err) } + if err := d.Set("open_monitoring", flattenMskOpenMonitoring(cluster.OpenMonitoring)); err != nil { + return fmt.Errorf("error setting open_monitoring: %s", err) + } + d.Set("zookeeper_connect_string", aws.StringValue(cluster.ZookeeperConnectString)) return nil @@ -403,6 +455,31 @@ func resourceAwsMskClusterUpdate(d *schema.ResourceData, meta interface{}) error } } + if d.HasChange("enhanced_monitoring") || d.HasChange("open_monitoring") { + input := &kafka.UpdateMonitoringInput{ + ClusterArn: aws.String(d.Id()), + CurrentVersion: aws.String(d.Get("current_version").(string)), + EnhancedMonitoring: aws.String(d.Get("enhanced_monitoring").(string)), + OpenMonitoring: expandMskOpenMonitoring(d.Get("open_monitoring").([]interface{})), + } + + output, err := conn.UpdateMonitoring(input) + + if err != nil { + return fmt.Errorf("error updating MSK Cluster (%s) monitoring: %s", d.Id(), err) + } + + if output == nil { + return fmt.Errorf("error updating MSK Cluster (%s) monitoring: empty response", d.Id()) + } + + clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + + if err := waitForMskClusterOperation(conn, clusterOperationARN); err != nil { + return fmt.Errorf("error waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) + } + } + if d.HasChange("configuration_info") { input := &kafka.UpdateClusterConfigurationInput{ ClusterArn: aws.String(d.Id()), @@ -539,6 +616,63 @@ func expandMskClusterTls(l []interface{}) *kafka.Tls { return tls } +func expandMskOpenMonitoring(l []interface{}) *kafka.OpenMonitoringInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + openMonitoring := &kafka.OpenMonitoringInfo{ + Prometheus: expandMskOpenMonitoringPrometheus(m["prometheus"].([]interface{})), + } + + return openMonitoring +} + +func expandMskOpenMonitoringPrometheus(l []interface{}) *kafka.PrometheusInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + prometheus := &kafka.PrometheusInfo{ + JmxExporter: expandMskOpenMonitoringPrometheusJmxExporter(m["jmx_exporter"].([]interface{})), + NodeExporter: expandMskOpenMonitoringPrometheusNodeExporter(m["node_exporter"].([]interface{})), + } + + return prometheus +} + +func expandMskOpenMonitoringPrometheusJmxExporter(l []interface{}) *kafka.JmxExporterInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + jmxExporter := &kafka.JmxExporterInfo{ + EnabledInBroker: aws.Bool(m["enabled_in_broker"].(bool)), + } + + return jmxExporter +} + +func expandMskOpenMonitoringPrometheusNodeExporter(l []interface{}) *kafka.NodeExporterInfo { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + nodeExporter := &kafka.NodeExporterInfo{ + EnabledInBroker: aws.Bool(m["enabled_in_broker"].(bool)), + } + + return nodeExporter +} + func flattenMskBrokerNodeGroupInfo(b *kafka.BrokerNodeGroupInfo) []map[string]interface{} { if b == nil { @@ -622,6 +756,55 @@ func flattenMskTls(tls *kafka.Tls) []map[string]interface{} { return []map[string]interface{}{m} } +func flattenMskOpenMonitoring(e *kafka.OpenMonitoring) []map[string]interface{} { + if e == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "prometheus": flattenMskOpenMonitoringPrometheus(e.Prometheus), + } + + return []map[string]interface{}{m} +} + +func flattenMskOpenMonitoringPrometheus(e *kafka.Prometheus) []map[string]interface{} { + if e == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "jmx_exporter": flattenMskOpenMonitoringPrometheusJmxExporter(e.JmxExporter), + "node_exporter": flattenMskOpenMonitoringPrometheusNodeExporter(e.NodeExporter), + } + + return []map[string]interface{}{m} +} + +func flattenMskOpenMonitoringPrometheusJmxExporter(e *kafka.JmxExporter) []map[string]interface{} { + if e == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "enabled_in_broker": aws.BoolValue(e.EnabledInBroker), + } + + return []map[string]interface{}{m} +} + +func flattenMskOpenMonitoringPrometheusNodeExporter(e *kafka.NodeExporter) []map[string]interface{} { + if e == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "enabled_in_broker": aws.BoolValue(e.EnabledInBroker), + } + + return []map[string]interface{}{m} +} + func resourceAwsMskClusterDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).kafkaconn diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index 67397e027fd..35c7439b5f4 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -326,7 +326,7 @@ func TestAccAWSMskCluster_EncryptionInfo_EncryptionInTransit_InCluster(t *testin } func TestAccAWSMskCluster_EnhancedMonitoring(t *testing.T) { - var cluster kafka.ClusterInfo + var cluster1, cluster2 kafka.ClusterInfo rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_msk_cluster.test" @@ -338,7 +338,7 @@ func TestAccAWSMskCluster_EnhancedMonitoring(t *testing.T) { { Config: testAccMskClusterConfigEnhancedMonitoring(rName, "PER_BROKER"), Check: resource.ComposeTestCheckFunc( - testAccCheckMskClusterExists(resourceName, &cluster), + testAccCheckMskClusterExists(resourceName, &cluster1), resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringPerBroker), ), }, @@ -351,6 +351,14 @@ func TestAccAWSMskCluster_EnhancedMonitoring(t *testing.T) { "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return }, }, + { + Config: testAccMskClusterConfigEnhancedMonitoring(rName, "PER_TOPIC_PER_BROKER"), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster2), + testAccCheckMskClusterNotRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringPerTopicPerBroker), + ), + }, }, }) } @@ -403,6 +411,32 @@ func TestAccAWSMskCluster_NumberOfBrokerNodes(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "6"), ), }, + }, + }) +} + +func TestAccAWSMskCluster_OpenMonitoring(t *testing.T) { + var cluster1, cluster2 kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskClusterConfigOpenMonitoring(rName, false, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.jmx_exporter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.jmx_exporter.0.enabled_in_broker", "false"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.node_exporter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.node_exporter.0.enabled_in_broker", "false"), + ), + }, { ResourceName: resourceName, ImportState: true, @@ -412,6 +446,19 @@ func TestAccAWSMskCluster_NumberOfBrokerNodes(t *testing.T) { "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return }, }, + { + Config: testAccMskClusterConfigOpenMonitoring(rName, true, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster2), + testAccCheckMskClusterNotRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.jmx_exporter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.jmx_exporter.0.enabled_in_broker", "true"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.node_exporter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "open_monitoring.0.prometheus.0.node_exporter.0.enabled_in_broker", "false"), + ), + }, }, }) } @@ -836,6 +883,12 @@ resource "aws_msk_cluster" "test" { kafka_version = "2.2.1" number_of_broker_nodes = 3 + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] ebs_volume_size = 10 @@ -871,6 +924,38 @@ resource "aws_msk_cluster" "test" { } +func testAccMskClusterConfigOpenMonitoring(rName string, jmxExporterEnabled bool, nodeExporterEnabled bool) string { + return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.2.1" + number_of_broker_nodes = 3 + + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + + broker_node_group_info { + client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] + ebs_volume_size = 10 + instance_type = "kafka.m5.large" + security_groups = ["${aws_security_group.example_sg.id}"] + } + + open_monitoring { + prometheus { + jmx_exporter { + enabled_in_broker = %[2]t + } + node_exporter { + enabled_in_broker = %[3]t + } + } + } +} +`, rName, jmxExporterEnabled, nodeExporterEnabled) } func testAccMskClusterConfigTags1(rName string) string { diff --git a/website/docs/r/msk_cluster.html.markdown b/website/docs/r/msk_cluster.html.markdown index 58b89e3d1e9..924433b6bad 100644 --- a/website/docs/r/msk_cluster.html.markdown +++ b/website/docs/r/msk_cluster.html.markdown @@ -68,6 +68,17 @@ resource "aws_msk_cluster" "example" { encryption_at_rest_kms_key_arn = "${aws_kms_key.kms.arn}" } + open_monitoring { + prometheus { + jmx_exporter { + enabled_in_broker = true + } + node_exporter { + enabled_in_broker = true + } + } + } + tags = { foo = "bar" } @@ -100,6 +111,7 @@ The following arguments are supported: * `configuration_info` - (Optional) Configuration block for specifying a MSK Configuration to attach to Kafka brokers. See below. * `encryption_info` - (Optional) Configuration block for specifying encryption. See below. * `enhanced_monitoring` - (Optional) Specify the desired enhanced MSK CloudWatch monitoring level. See [Monitoring Amazon MSK with Amazon CloudWatch](https://docs.aws.amazon.com/msk/latest/developerguide/monitoring.html) +* `open_monitoring` - (Optional) Configuration block for JMX and Node monitoring for the MSK cluster. See below. * `tags` - (Optional) A mapping of tags to assign to the resource ### broker_node_group_info Argument Reference @@ -133,6 +145,23 @@ The following arguments are supported: * `client_broker` - (Optional) Encryption setting for data in transit between clients and brokers. Valid values: `TLS`, `TLS_PLAINTEXT`, and `PLAINTEXT`. Default value: `TLS_PLAINTEXT`. * `in_cluster` - (Optional) Whether data communication among broker nodes is encrypted. Default value: `true`. +#### open_monitoring Argument Reference + +* `prometheus` - (Optional) Configuration block for Prometheus settings for open monitoring. See below. + +#### open_monitoring prometheus Argument Reference + +* `jmx_exporter` - (Optional) Configuration block for JMX Exporter. See below. +* `node_exporter` - (Optional) Configuration block for Node Exporter. See below. + +#### open_monitoring prometheus jmx_exporter Argument Reference + +* `enabled_in_broker` - (Required) Indicates whether you want to enable or disable the JMX Exporter. + +#### open_monitoring prometheus node_exporter Argument Reference + +* `enabled_in_broker` - (Required) Indicates whether you want to enable or disable the Node Exporter. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: From 7f893ca144330b868689bf9cacac1816dba2141c Mon Sep 17 00:00:00 2001 From: Marco Reni Date: Thu, 2 Jan 2020 09:05:30 +0100 Subject: [PATCH 3/4] aws_msk_cluster: fix tests + align docs --- aws/resource_aws_msk_cluster_test.go | 42 +++++++++++++++++++++++- website/docs/d/msk_cluster.html.markdown | 2 +- website/docs/r/msk_cluster.html.markdown | 2 +- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index 35c7439b5f4..5cfacf4a664 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -95,7 +95,7 @@ func TestAccAWSMskCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kafka_version", "2.2.1"), resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "3"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestMatchResourceAttr(resourceName, "zookeeper_connect_string", regexp.MustCompile(`^\d+\.\d+\.\d+\.\d+:\d+,\d+\.\d+\.\d+\.\d+:\d+,\d+\.\d+\.\d+\.\d+:\d+$`)), + resource.TestMatchResourceAttr(resourceName, "zookeeper_connect_string", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), ), }, { @@ -670,6 +670,12 @@ resource "aws_msk_cluster" "test" { kafka_version = "2.2.1" number_of_broker_nodes = 3 + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] ebs_volume_size = 10 @@ -687,6 +693,12 @@ resource "aws_msk_cluster" "test" { kafka_version = "2.2.1" number_of_broker_nodes = 3 + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] ebs_volume_size = %[2]d @@ -753,6 +765,12 @@ resource "aws_msk_cluster" "test" { kafka_version = "2.2.1" number_of_broker_nodes = 3 + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] ebs_volume_size = 10 @@ -784,6 +802,12 @@ resource "aws_msk_cluster" "test" { kafka_version = "2.2.1" number_of_broker_nodes = 3 + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] ebs_volume_size = 10 @@ -823,6 +847,9 @@ resource "aws_msk_cluster" "test" { encryption_info { encryption_at_rest_kms_key_arn = "${aws_kms_key.example_key.arn}" + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } } } `, rName) @@ -868,6 +895,7 @@ resource "aws_msk_cluster" "test" { encryption_info { encryption_in_transit { + client_broker = "TLS_PLAINTEXT" in_cluster = %[2]t } } @@ -965,6 +993,12 @@ resource "aws_msk_cluster" "test" { kafka_version = "2.2.1" number_of_broker_nodes = 3 + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] ebs_volume_size = 10 @@ -986,6 +1020,12 @@ resource "aws_msk_cluster" "test" { kafka_version = "2.2.1" number_of_broker_nodes = 3 + encryption_info { + encryption_in_transit { + client_broker = "TLS_PLAINTEXT" + } + } + broker_node_group_info { client_subnets = ["${aws_subnet.example_subnet_az1.id}", "${aws_subnet.example_subnet_az2.id}", "${aws_subnet.example_subnet_az3.id}"] ebs_volume_size = 10 diff --git a/website/docs/d/msk_cluster.html.markdown b/website/docs/d/msk_cluster.html.markdown index fd83b9959fa..4ab8eb5733c 100644 --- a/website/docs/d/msk_cluster.html.markdown +++ b/website/docs/d/msk_cluster.html.markdown @@ -34,4 +34,4 @@ In addition to all arguments above, the following attributes are exported: * `kafka_version` - Apache Kafka version. * `number_of_broker_nodes` - Number of broker nodes in the cluster. * `tags` - Map of key-value pairs assigned to the cluster. -* `zookeeper_connect_string` - A comma separated list of one or more IP:port pairs to use to connect to the Apache Zookeeper cluster. +* `zookeeper_connect_string` - A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster. diff --git a/website/docs/r/msk_cluster.html.markdown b/website/docs/r/msk_cluster.html.markdown index 924433b6bad..942e158496b 100644 --- a/website/docs/r/msk_cluster.html.markdown +++ b/website/docs/r/msk_cluster.html.markdown @@ -171,7 +171,7 @@ In addition to all arguments above, the following attributes are exported: * `bootstrap_brokers_tls` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity to the kafka cluster. Only contains value if `client_broker` encryption in transit is set to `TLS_PLAINTEXT` or `TLS`. * `current_version` - Current version of the MSK Cluster used for updates, e.g. `K13V1IB3VIYZZH` * `encryption_info.0.encryption_at_rest_kms_key_arn` - The ARN of the KMS key used for encryption at rest of the broker data volumes. -* `zookeeper_connect_string` - A comma separated list of one or more IP:port pairs to use to connect to the Apache Zookeeper cluster. +* `zookeeper_connect_string` - A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster. ## Import From 42a11a6a74355d5079235e817d0603c0e7f42909 Mon Sep 17 00:00:00 2001 From: Marco Reni Date: Wed, 29 Jan 2020 14:40:18 +0100 Subject: [PATCH 4/4] Fix field requirement --- aws/resource_aws_msk_cluster.go | 7 +++---- website/docs/r/msk_cluster.html.markdown | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index 7df42086071..74ffbf6b31f 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -213,10 +213,9 @@ func resourceAwsMskCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "prometheus": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - MaxItems: 1, + Type: schema.TypeList, + Required: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "jmx_exporter": { diff --git a/website/docs/r/msk_cluster.html.markdown b/website/docs/r/msk_cluster.html.markdown index 942e158496b..ab104cd76f4 100644 --- a/website/docs/r/msk_cluster.html.markdown +++ b/website/docs/r/msk_cluster.html.markdown @@ -147,7 +147,7 @@ The following arguments are supported: #### open_monitoring Argument Reference -* `prometheus` - (Optional) Configuration block for Prometheus settings for open monitoring. See below. +* `prometheus` - (Required) Configuration block for Prometheus settings for open monitoring. See below. #### open_monitoring prometheus Argument Reference