From 288f1b75794139a427d38a913745abc081663cd2 Mon Sep 17 00:00:00 2001 From: David Blooman Date: Wed, 23 Sep 2020 14:19:34 +0100 Subject: [PATCH 1/6] r/aws_msk_sasl_scram_secret: New Resouce --- aws/data_source_aws_msk_cluster.go | 5 + aws/provider.go | 1 + aws/resource_aws_msk_cluster.go | 51 ++- aws/resource_aws_msk_cluster_test.go | 79 ++++ aws/resource_aws_msk_scram_secret.go | 216 +++++++++++ aws/resource_aws_msk_scram_secret_test.go | 361 ++++++++++++++++++ website/docs/d/msk_cluster.html.markdown | 1 + website/docs/r/msk_cluster.html.markdown | 10 +- website/docs/r/msk_sasl_scram_secret.markdown | 90 +++++ 9 files changed, 810 insertions(+), 4 deletions(-) create mode 100644 aws/resource_aws_msk_scram_secret.go create mode 100644 aws/resource_aws_msk_scram_secret_test.go create mode 100644 website/docs/r/msk_sasl_scram_secret.markdown diff --git a/aws/data_source_aws_msk_cluster.go b/aws/data_source_aws_msk_cluster.go index 7797f0a39f9..e2c798e4152 100644 --- a/aws/data_source_aws_msk_cluster.go +++ b/aws/data_source_aws_msk_cluster.go @@ -27,6 +27,10 @@ func dataSourceAwsMskCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bootstrap_brokers_sasl_scram": { + Type: schema.TypeString, + Computed: true, + }, "cluster_name": { Type: schema.TypeString, Required: true, @@ -101,6 +105,7 @@ func dataSourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("arn", aws.StringValue(cluster.ClusterArn)) d.Set("bootstrap_brokers", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerString)) d.Set("bootstrap_brokers_tls", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerStringTls)) + d.Set("bootstrap_brokers_sasl_scram", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerStringSaslScram)) d.Set("cluster_name", aws.StringValue(cluster.ClusterName)) d.Set("kafka_version", aws.StringValue(cluster.CurrentBrokerSoftwareInfo.KafkaVersion)) d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) diff --git a/aws/provider.go b/aws/provider.go index 976a61f49a0..e5e2df44f53 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -758,6 +758,7 @@ func Provider() *schema.Provider { "aws_media_store_container_policy": resourceAwsMediaStoreContainerPolicy(), "aws_msk_cluster": resourceAwsMskCluster(), "aws_msk_configuration": resourceAwsMskConfiguration(), + "aws_msk_sasl_scram_secret": resourceAwsMskScramSecret(), "aws_nat_gateway": resourceAwsNatGateway(), "aws_network_acl": resourceAwsNetworkAcl(), "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index d6688b6a86b..467a63427de 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -35,6 +35,10 @@ func resourceAwsMskCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bootstrap_brokers_sasl_scram": { + Type: schema.TypeString, + Computed: true, + }, "broker_node_group_info": { Type: schema.TypeList, Required: true, @@ -106,6 +110,18 @@ func resourceAwsMskCluster() *schema.Resource { }, }, }, + "sasl": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scram": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, }, }, }, @@ -439,6 +455,7 @@ func resourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", aws.StringValue(cluster.ClusterArn)) d.Set("bootstrap_brokers", aws.StringValue(brokerOut.BootstrapBrokerString)) d.Set("bootstrap_brokers_tls", aws.StringValue(brokerOut.BootstrapBrokerStringTls)) + d.Set("bootstrap_brokers_sasl_scram", aws.StringValue(brokerOut.BootstrapBrokerStringSaslScram)) if err := d.Set("broker_node_group_info", flattenMskBrokerNodeGroupInfo(cluster.BrokerNodeGroupInfo)); err != nil { return fmt.Errorf("error setting broker_node_group_info: %s", err) @@ -629,7 +646,8 @@ func expandMskClusterClientAuthentication(l []interface{}) *kafka.ClientAuthenti m := l[0].(map[string]interface{}) ca := &kafka.ClientAuthentication{ - Tls: expandMskClusterTls(m["tls"].([]interface{})), + Tls: expandMskClusterTls(m["tls"].([]interface{})), + Sasl: expandMskClusterScram(m["sasl"].([]interface{})), } return ca @@ -699,6 +717,22 @@ func expandMskClusterTls(l []interface{}) *kafka.Tls { return tls } +func expandMskClusterScram(l []interface{}) *kafka.Sasl { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + sasl := &kafka.Sasl{ + Scram: &kafka.Scram{ + Enabled: aws.Bool(m["scram"].(bool)), + }, + } + + return sasl +} + func expandMskOpenMonitoring(l []interface{}) *kafka.OpenMonitoringInfo { if len(l) == 0 || l[0] == nil { return nil @@ -858,7 +892,8 @@ func flattenMskClientAuthentication(ca *kafka.ClientAuthentication) []map[string } m := map[string]interface{}{ - "tls": flattenMskTls(ca.Tls), + "tls": flattenMskTls(ca.Tls), + "sasl": flattenMskSasl(ca.Sasl), } return []map[string]interface{}{m} @@ -915,6 +950,18 @@ func flattenMskTls(tls *kafka.Tls) []map[string]interface{} { return []map[string]interface{}{m} } +func flattenMskSasl(sasl *kafka.Sasl) []map[string]interface{} { + if sasl == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "scram": aws.Bool(true), + } + + return []map[string]interface{}{m} +} + func flattenMskOpenMonitoring(e *kafka.OpenMonitoring) []map[string]interface{} { if e == nil { return []map[string]interface{}{} diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index c4950d94c2a..1e8e35b3df8 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -185,6 +185,37 @@ func TestAccAWSMskCluster_ClientAuthentication_Tls_CertificateAuthorityArns(t *t }) } +func TestAccAWSMskCluster_ClientAuthentication_Sasl_Scram(t *testing.T) { + var cluster1 kafka.ClusterInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskClusterConfigClientAuthenticationSaslScram(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "client_authentication.#", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.0.scram", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + }, + }) +} + func TestAccAWSMskCluster_ConfigurationInfo_Revision(t *testing.T) { var cluster1, cluster2 kafka.ClusterInfo @@ -792,6 +823,54 @@ resource "aws_msk_cluster" "test" { `, rName) } +func testAccMskClusterConfigClientAuthenticationSaslScram(rName string) string { + return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_secretsmanager_secret" "test" { + name = "AmazonMSK_test" + kms_key_id = aws_kms_key.test.key_id +} + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_string = jsonencode({ username = "%s", password = "foobar" }) +} + +resource "aws_kms_key" "test" { + description = "test" +} + +resource "aws_msk_sasl_scram_secret" "test" { + cluster_arn = aws_msk_cluster.test.arn + secret_arn_list = [aws_secretsmanager_secret.test.arn] +} + +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.2.1" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = [aws_subnet.example_subnet_az1.id, aws_subnet.example_subnet_az2.id, aws_subnet.example_subnet_az3.id] + ebs_volume_size = 10 + instance_type = "kafka.m5.large" + security_groups = [aws_security_group.example_sg.id] + } + + client_authentication { + sasl { + scram = true + } + } + + encryption_info { + encryption_in_transit { + client_broker = "TLS" + } + } +} +`, rName) +} + func testAccMskClusterConfigConfigurationInfoRevision1(rName string) string { return testAccMskClusterBaseConfig() + fmt.Sprintf(` resource "aws_msk_configuration" "test" { diff --git a/aws/resource_aws_msk_scram_secret.go b/aws/resource_aws_msk_scram_secret.go new file mode 100644 index 00000000000..44e11440a22 --- /dev/null +++ b/aws/resource_aws_msk_scram_secret.go @@ -0,0 +1,216 @@ +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceAwsMskScramSecret() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsMskScramSecretCreate, + Read: resourceAwsMskScramSecretRead, + Update: resourceAwsMskScramSecretUpdate, + Delete: resourceAwsMskScramSecretDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "cluster_arn": { + Type: schema.TypeString, + Required: true, + }, + "secret_arn_list": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "scram_secrets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAwsMskScramSecretCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + existingSecrets, err := readSecrets(conn, d.Get("cluster_arn").(string)) + if err != nil { + return fmt.Errorf("failed lookup secrets %s", err) + } + + createSecrets := filterNewSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) + + out, err := associateSecrets(conn, d.Get("cluster_arn").(string), createSecrets) + if err != nil { + return fmt.Errorf("error associating credentials with MSK cluster: %s", err) + } + d.SetId(d.Get("cluster_arn").(string)) + + if len(out.UnprocessedScramSecrets) != 0 { + return fmt.Errorf("there were unprocessed secrets during association: %s", out.UnprocessedScramSecrets) + } + + return resourceAwsMskScramSecretRead(d, meta) +} + +func resourceAwsMskScramSecretRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + scramSecrets, err := readSecrets(conn, d.Get("cluster_arn").(string)) + if err != nil { + return fmt.Errorf("failed lookup secrets %s", err) + } + + allSecrets := filterExistingSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), scramSecrets) + + d.SetId(d.Get("cluster_arn").(string)) + d.Set("arn", d.Get("cluster_arn").(string)) + d.Set("scram_secrets", aws.StringValueSlice(allSecrets)) + + return nil +} + +func resourceAwsMskScramSecretUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + existingSecrets := expandStringList(d.Get("scram_secrets").([]interface{})) + + updateSecrets, deleteSecrets := filterSecretsForDeletion(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) + + out, err := associateSecrets(conn, d.Get("cluster_arn").(string), updateSecrets) + if err != nil { + return fmt.Errorf("error associating credentials with MSK cluster: %s", err) + } + + if len(out.UnprocessedScramSecrets) != 0 { + return fmt.Errorf("there were unprocessed secrets during association: %s", out.UnprocessedScramSecrets) + } + + if len(deleteSecrets) > 0 { + deleteOutput, err := conn.BatchDisassociateScramSecret(&kafka.BatchDisassociateScramSecretInput{ + ClusterArn: aws.String(d.Get("cluster_arn").(string)), + SecretArnList: deleteSecrets, + }) + if err != nil { + return fmt.Errorf("error disassociating credentials with MSK cluster: %s", err) + } + + if len(deleteOutput.UnprocessedScramSecrets) != 0 { + return fmt.Errorf("there were unprocessed secrets during association: %s", out.UnprocessedScramSecrets) + } + } + + return resourceAwsMskScramSecretRead(d, meta) +} + +func resourceAwsMskScramSecretDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + _, err := conn.BatchDisassociateScramSecret(&kafka.BatchDisassociateScramSecretInput{ + ClusterArn: aws.String(d.Get("cluster_arn").(string)), + SecretArnList: expandStringList(d.Get("secret_arn_list").([]interface{})), + }) + if err != nil { + return fmt.Errorf("error disassociating credentials with MSK cluster: %s", err) + } + + return nil +} + +func readSecrets(conn *kafka.Kafka, clusterArn string) ([]*string, error) { + input := &kafka.ListScramSecretsInput{ + ClusterArn: aws.String(clusterArn), + } + + var scramSecrets []*string + err := conn.ListScramSecretsPages(input, + func(page *kafka.ListScramSecretsOutput, lastPage bool) bool { + scramSecrets = append(scramSecrets, page.SecretArnList...) + return !lastPage + }) + if err != nil { + return nil, err + } + + return scramSecrets, nil +} + +func associateSecrets(conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchAssociateScramSecretOutput, error) { + batch := 10 + + output := &kafka.BatchAssociateScramSecretOutput{} + + for i := 0; i < len(secretArnList); i += batch { + end := i + batch + if end > len(secretArnList) { + end = len(secretArnList) + } + out, err := conn.BatchAssociateScramSecret(&kafka.BatchAssociateScramSecretInput{ + ClusterArn: aws.String(clusterArn), + SecretArnList: secretArnList[i:end], + }) + if err != nil { + return nil, err + } + for _, secret := range out.UnprocessedScramSecrets { + if secret.ErrorCode != nil { + output.UnprocessedScramSecrets = append(output.UnprocessedScramSecrets, secret) + } + } + } + return output, nil +} + +func filterExistingSecrets(existingSecrets, newSecrets []*string) []*string { + finalSecrets := []*string{} + for _, existingSecret := range existingSecrets { + if contains(newSecrets, existingSecret) { + finalSecrets = append(finalSecrets, existingSecret) + } + } + return finalSecrets +} + +func filterNewSecrets(existingSecrets, newSecrets []*string) []*string { + finalSecrets := []*string{} + for _, existingSecret := range existingSecrets { + if !contains(newSecrets, existingSecret) { + finalSecrets = append(finalSecrets, existingSecret) + } + } + return finalSecrets +} + +func filterSecretsForDeletion(newSecrets, existingSecrets []*string) ([]*string, []*string) { + var updateSecrets, deleteSecrets []*string + for _, existingSecret := range existingSecrets { + if !contains(newSecrets, existingSecret) { + deleteSecrets = append(deleteSecrets, existingSecret) + } + } + for _, newSecret := range newSecrets { + if !contains(existingSecrets, newSecret) { + updateSecrets = append(updateSecrets, newSecret) + } + } + return updateSecrets, deleteSecrets +} + +func contains(slice []*string, val *string) bool { + for _, item := range slice { + if *item == *val { + return true + } + } + return false +} diff --git a/aws/resource_aws_msk_scram_secret_test.go b/aws/resource_aws_msk_scram_secret_test.go new file mode 100644 index 00000000000..ee81e21cd9b --- /dev/null +++ b/aws/resource_aws_msk_scram_secret_test.go @@ -0,0 +1,361 @@ +package aws + +import ( + "fmt" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +var newSecrets = aws.StringSlice([]string{"AmazonMSK_test_aws_example_test_tf-test-2-jbuaA1", + "AmazonMSK_test_aws_example_test_tf-test-5-I50lRm"}) + +var existingSecrets = aws.StringSlice([]string{"AmazonMSK_test_aws_example_test_tf-test-2-jbuaA1", + "AmazonMSK_test_aws_example_test_tf-test-4-I50lRm"}) + +func TestAccAwsMskScramSecret_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_sasl_scram_secret.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskSaslScramSecret_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttrSet(resourceName, "scram_secrets.#"), + ), + }, + }, + }) +} +func TestAccAwsMskScramSecret_Delete(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_sasl_scram_secret.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskSaslScramSecret_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "scram_secrets.#"), + ), + }, + { + Config: testAccMskSaslScramSecretDelete(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSaslScramSecretsDestruction(resourceName), + ), + }, + }, + }) +} + +func TestAccAwsMskScramSecret_UpdateRemove(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_sasl_scram_secret.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskSaslScramSecret_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "scram_secrets.#"), + ), + }, + { + Config: testAccMskSaslScramSecretUpdate(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSaslScramSecretsDontExist(resourceName, 2), + ), + }, + }, + }) +} + +func testAccCheckAwsSaslScramSecretsDontExist(resourceName string, count int) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if len(rs.Primary.Attributes["secret_arn_list"]) > count { + return fmt.Errorf("Too many secrets for %v", rs.Primary.Attributes["secret_arn_list"]) + } + + return nil + } +} + +func testAccCheckAwsSaslScramSecretsDestruction(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[resourceName] + if ok { + return fmt.Errorf("Should not have found: %s", resourceName) + } + + return nil + } +} + +func TestAwsMskFilterNewSecrets(t *testing.T) { + expected := []string{"AmazonMSK_test_aws_example_test_tf-test-5-I50lRm"} + + result := filterNewSecrets(newSecrets, existingSecrets) + if !reflect.DeepEqual(expected, aws.StringValueSlice(result)) { + t.Fatalf("Expected secret list to be %v, got %v", expected, aws.StringValueSlice(result)) + } +} + +func TestAwsMskFilterExistingSecrets(t *testing.T) { + expected := []string{"AmazonMSK_test_aws_example_test_tf-test-2-jbuaA1"} + + result := filterExistingSecrets(newSecrets, existingSecrets) + if !reflect.DeepEqual(expected, aws.StringValueSlice(result)) { + t.Fatalf("Expected secret list to be %v, got %v", expected, aws.StringValueSlice(result)) + } +} + +func TestAwsMskFilterDeletionSecrets(t *testing.T) { + expectedUpdate := []string{"AmazonMSK_test_aws_example_test_tf-test-5-I50lRm"} + expectedDelete := []string{"AmazonMSK_test_aws_example_test_tf-test-4-I50lRm"} + + updated, deleted := filterSecretsForDeletion(newSecrets, existingSecrets) + + if !reflect.DeepEqual(expectedUpdate, aws.StringValueSlice(updated)) { + t.Fatalf("Expected secret list to be %v, got %v", expectedUpdate, aws.StringValueSlice(updated)) + } + + if !reflect.DeepEqual(expectedDelete, aws.StringValueSlice(deleted)) { + t.Fatalf("Expected secret list to be %v, got %v", expectedDelete, aws.StringValueSlice(deleted)) + } +} + +func testAccMskSaslScramSecret_basic(rName string) string { + return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.5.1" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = [aws_subnet.example_subnet_az1.id, aws_subnet.example_subnet_az2.id, aws_subnet.example_subnet_az3.id] + ebs_volume_size = 10 + instance_type = "kafka.t3.small" + security_groups = [aws_security_group.example_sg.id] + } + + client_authentication { + sasl { + scram = true + } + } +} + +resource "aws_kms_key" "test" { + description = "%s-kms-key-msk" +} + +resource "aws_secretsmanager_secret" "test_basic" { + name = "AmazonMSK_test_%s_1_basic" + kms_key_id = aws_kms_key.test.key_id +} + +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test_basic.id + secret_string = jsonencode({ username = "%s", password = "foobar" }) +} + +resource "aws_msk_sasl_scram_secret" "test" { + cluster_arn = aws_msk_cluster.test.arn + secret_arn_list = [aws_secretsmanager_secret.test_basic.arn] +} + +resource "aws_secretsmanager_secret_policy" "test" { + secret_arn = aws_secretsmanager_secret.test_basic.arn + policy = < Date: Tue, 3 Nov 2020 13:48:34 +0000 Subject: [PATCH 2/6] lint tf --- aws/resource_aws_msk_scram_secret_test.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/aws/resource_aws_msk_scram_secret_test.go b/aws/resource_aws_msk_scram_secret_test.go index ee81e21cd9b..0fb5b20377a 100644 --- a/aws/resource_aws_msk_scram_secret_test.go +++ b/aws/resource_aws_msk_scram_secret_test.go @@ -167,7 +167,7 @@ resource "aws_msk_cluster" "test" { } resource "aws_kms_key" "test" { - description = "%s-kms-key-msk" + description = "%s-kms-key-msk" } resource "aws_secretsmanager_secret" "test_basic" { @@ -187,7 +187,7 @@ resource "aws_msk_sasl_scram_secret" "test" { resource "aws_secretsmanager_secret_policy" "test" { secret_arn = aws_secretsmanager_secret.test_basic.arn - policy = < Date: Wed, 11 Nov 2020 13:58:20 +0000 Subject: [PATCH 3/6] add conflicts and restructure tests --- aws/resource_aws_msk_cluster.go | 1 + aws/resource_aws_msk_scram_secret.go | 24 +++++----- aws/resource_aws_msk_scram_secret_test.go | 58 +++++++++++------------ 3 files changed, 42 insertions(+), 41 deletions(-) diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index 467a63427de..8ddedd741c1 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -121,6 +121,7 @@ func resourceAwsMskCluster() *schema.Resource { }, }, }, + ConflictsWith: []string{"client_authentication.0.tls"}, }, }, }, diff --git a/aws/resource_aws_msk_scram_secret.go b/aws/resource_aws_msk_scram_secret.go index 44e11440a22..c6741d1cbe2 100644 --- a/aws/resource_aws_msk_scram_secret.go +++ b/aws/resource_aws_msk_scram_secret.go @@ -43,14 +43,14 @@ func resourceAwsMskScramSecret() *schema.Resource { func resourceAwsMskScramSecretCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).kafkaconn - existingSecrets, err := readSecrets(conn, d.Get("cluster_arn").(string)) + existingSecrets, err := readMSKClusterSecrets(conn, d.Get("cluster_arn").(string)) if err != nil { return fmt.Errorf("failed lookup secrets %s", err) } - createSecrets := filterNewSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) + createSecrets := filterMskNewSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) - out, err := associateSecrets(conn, d.Get("cluster_arn").(string), createSecrets) + out, err := associateMSKClusterSecrets(conn, d.Get("cluster_arn").(string), createSecrets) if err != nil { return fmt.Errorf("error associating credentials with MSK cluster: %s", err) } @@ -66,12 +66,12 @@ func resourceAwsMskScramSecretCreate(d *schema.ResourceData, meta interface{}) e func resourceAwsMskScramSecretRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).kafkaconn - scramSecrets, err := readSecrets(conn, d.Get("cluster_arn").(string)) + scramSecrets, err := readMSKClusterSecrets(conn, d.Get("cluster_arn").(string)) if err != nil { return fmt.Errorf("failed lookup secrets %s", err) } - allSecrets := filterExistingSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), scramSecrets) + allSecrets := filterMskExistingSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), scramSecrets) d.SetId(d.Get("cluster_arn").(string)) d.Set("arn", d.Get("cluster_arn").(string)) @@ -85,9 +85,9 @@ func resourceAwsMskScramSecretUpdate(d *schema.ResourceData, meta interface{}) e existingSecrets := expandStringList(d.Get("scram_secrets").([]interface{})) - updateSecrets, deleteSecrets := filterSecretsForDeletion(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) + updateSecrets, deleteSecrets := filterMskSecretsForDeletion(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) - out, err := associateSecrets(conn, d.Get("cluster_arn").(string), updateSecrets) + out, err := associateMSKClusterSecrets(conn, d.Get("cluster_arn").(string), updateSecrets) if err != nil { return fmt.Errorf("error associating credentials with MSK cluster: %s", err) } @@ -127,7 +127,7 @@ func resourceAwsMskScramSecretDelete(d *schema.ResourceData, meta interface{}) e return nil } -func readSecrets(conn *kafka.Kafka, clusterArn string) ([]*string, error) { +func readMSKClusterSecrets(conn *kafka.Kafka, clusterArn string) ([]*string, error) { input := &kafka.ListScramSecretsInput{ ClusterArn: aws.String(clusterArn), } @@ -145,7 +145,7 @@ func readSecrets(conn *kafka.Kafka, clusterArn string) ([]*string, error) { return scramSecrets, nil } -func associateSecrets(conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchAssociateScramSecretOutput, error) { +func associateMSKClusterSecrets(conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchAssociateScramSecretOutput, error) { batch := 10 output := &kafka.BatchAssociateScramSecretOutput{} @@ -171,7 +171,7 @@ func associateSecrets(conn *kafka.Kafka, clusterArn string, secretArnList []*str return output, nil } -func filterExistingSecrets(existingSecrets, newSecrets []*string) []*string { +func filterMskExistingSecrets(existingSecrets, newSecrets []*string) []*string { finalSecrets := []*string{} for _, existingSecret := range existingSecrets { if contains(newSecrets, existingSecret) { @@ -181,7 +181,7 @@ func filterExistingSecrets(existingSecrets, newSecrets []*string) []*string { return finalSecrets } -func filterNewSecrets(existingSecrets, newSecrets []*string) []*string { +func filterMskNewSecrets(existingSecrets, newSecrets []*string) []*string { finalSecrets := []*string{} for _, existingSecret := range existingSecrets { if !contains(newSecrets, existingSecret) { @@ -191,7 +191,7 @@ func filterNewSecrets(existingSecrets, newSecrets []*string) []*string { return finalSecrets } -func filterSecretsForDeletion(newSecrets, existingSecrets []*string) ([]*string, []*string) { +func filterMskSecretsForDeletion(newSecrets, existingSecrets []*string) ([]*string, []*string) { var updateSecrets, deleteSecrets []*string for _, existingSecret := range existingSecrets { if !contains(newSecrets, existingSecret) { diff --git a/aws/resource_aws_msk_scram_secret_test.go b/aws/resource_aws_msk_scram_secret_test.go index 0fb5b20377a..d6c46736c44 100644 --- a/aws/resource_aws_msk_scram_secret_test.go +++ b/aws/resource_aws_msk_scram_secret_test.go @@ -86,36 +86,10 @@ func TestAccAwsMskScramSecret_UpdateRemove(t *testing.T) { }) } -func testAccCheckAwsSaslScramSecretsDontExist(resourceName string, count int) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Not found: %s", resourceName) - } - - if len(rs.Primary.Attributes["secret_arn_list"]) > count { - return fmt.Errorf("Too many secrets for %v", rs.Primary.Attributes["secret_arn_list"]) - } - - return nil - } -} - -func testAccCheckAwsSaslScramSecretsDestruction(resourceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[resourceName] - if ok { - return fmt.Errorf("Should not have found: %s", resourceName) - } - - return nil - } -} - func TestAwsMskFilterNewSecrets(t *testing.T) { expected := []string{"AmazonMSK_test_aws_example_test_tf-test-5-I50lRm"} - result := filterNewSecrets(newSecrets, existingSecrets) + result := filterMskNewSecrets(newSecrets, existingSecrets) if !reflect.DeepEqual(expected, aws.StringValueSlice(result)) { t.Fatalf("Expected secret list to be %v, got %v", expected, aws.StringValueSlice(result)) } @@ -124,7 +98,7 @@ func TestAwsMskFilterNewSecrets(t *testing.T) { func TestAwsMskFilterExistingSecrets(t *testing.T) { expected := []string{"AmazonMSK_test_aws_example_test_tf-test-2-jbuaA1"} - result := filterExistingSecrets(newSecrets, existingSecrets) + result := filterMskExistingSecrets(newSecrets, existingSecrets) if !reflect.DeepEqual(expected, aws.StringValueSlice(result)) { t.Fatalf("Expected secret list to be %v, got %v", expected, aws.StringValueSlice(result)) } @@ -134,7 +108,7 @@ func TestAwsMskFilterDeletionSecrets(t *testing.T) { expectedUpdate := []string{"AmazonMSK_test_aws_example_test_tf-test-5-I50lRm"} expectedDelete := []string{"AmazonMSK_test_aws_example_test_tf-test-4-I50lRm"} - updated, deleted := filterSecretsForDeletion(newSecrets, existingSecrets) + updated, deleted := filterMskSecretsForDeletion(newSecrets, existingSecrets) if !reflect.DeepEqual(expectedUpdate, aws.StringValueSlice(updated)) { t.Fatalf("Expected secret list to be %v, got %v", expectedUpdate, aws.StringValueSlice(updated)) @@ -145,6 +119,32 @@ func TestAwsMskFilterDeletionSecrets(t *testing.T) { } } +func testAccCheckAwsSaslScramSecretsDontExist(resourceName string, count int) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if len(rs.Primary.Attributes["secret_arn_list"]) > count { + return fmt.Errorf("Too many secrets for %v", rs.Primary.Attributes["secret_arn_list"]) + } + + return nil + } +} + +func testAccCheckAwsSaslScramSecretsDestruction(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[resourceName] + if ok { + return fmt.Errorf("Should not have found: %s", resourceName) + } + + return nil + } +} + func testAccMskSaslScramSecret_basic(rName string) string { return testAccMskClusterBaseConfig() + fmt.Sprintf(` resource "aws_msk_cluster" "test" { From 5c9260870b3eccfad6b48c957ad5e24bb8b90138 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 18 Nov 2020 00:37:57 -0500 Subject: [PATCH 4/6] CR updates and refactor --- aws/internal/service/msk/finder/finder.go | 24 ++ aws/provider.go | 2 +- aws/resource_aws_msk_cluster.go | 10 +- aws/resource_aws_msk_cluster_test.go | 19 +- aws/resource_aws_msk_scram_secret.go | 216 ----------- ...source_aws_msk_scram_secret_association.go | 203 ++++++++++ ...e_aws_msk_scram_secret_association_test.go | 247 ++++++++++++ aws/resource_aws_msk_scram_secret_test.go | 359 ------------------ website/docs/r/msk_sasl_scram_secret.markdown | 90 ----- ...msk_scram_secret_association.html.markdown | 75 ++++ 10 files changed, 569 insertions(+), 676 deletions(-) create mode 100644 aws/internal/service/msk/finder/finder.go delete mode 100644 aws/resource_aws_msk_scram_secret.go create mode 100644 aws/resource_aws_msk_scram_secret_association.go create mode 100644 aws/resource_aws_msk_scram_secret_association_test.go delete mode 100644 aws/resource_aws_msk_scram_secret_test.go delete mode 100644 website/docs/r/msk_sasl_scram_secret.markdown create mode 100644 website/docs/r/msk_scram_secret_association.html.markdown diff --git a/aws/internal/service/msk/finder/finder.go b/aws/internal/service/msk/finder/finder.go new file mode 100644 index 00000000000..45da995c0cf --- /dev/null +++ b/aws/internal/service/msk/finder/finder.go @@ -0,0 +1,24 @@ +package finder + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" +) + +// ScramSecrets returns the matching MSK Cluster's associated secrets +func ScramSecrets(conn *kafka.Kafka, clusterArn string) ([]*string, error) { + input := &kafka.ListScramSecretsInput{ + ClusterArn: aws.String(clusterArn), + } + + var scramSecrets []*string + err := conn.ListScramSecretsPages(input, func(page *kafka.ListScramSecretsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + scramSecrets = append(scramSecrets, page.SecretArnList...) + return !lastPage + }) + + return scramSecrets, err +} diff --git a/aws/provider.go b/aws/provider.go index e5e2df44f53..014daf68a5b 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -758,7 +758,7 @@ func Provider() *schema.Provider { "aws_media_store_container_policy": resourceAwsMediaStoreContainerPolicy(), "aws_msk_cluster": resourceAwsMskCluster(), "aws_msk_configuration": resourceAwsMskConfiguration(), - "aws_msk_sasl_scram_secret": resourceAwsMskScramSecret(), + "aws_msk_scram_secret_association": resourceAwsMskScramSecretAssociation(), "aws_nat_gateway": resourceAwsNatGateway(), "aws_network_acl": resourceAwsNetworkAcl(), "aws_default_network_acl": resourceAwsDefaultNetworkAcl(), diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index 8ddedd741c1..d87ddbde162 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -113,11 +113,14 @@ func resourceAwsMskCluster() *schema.Resource { "sasl": { Type: schema.TypeList, Optional: true, + ForceNew: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "scram": { Type: schema.TypeBool, Optional: true, + ForceNew: true, }, }, }, @@ -723,11 +726,14 @@ func expandMskClusterScram(l []interface{}) *kafka.Sasl { return nil } - m := l[0].(map[string]interface{}) + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } sasl := &kafka.Sasl{ Scram: &kafka.Scram{ - Enabled: aws.Bool(m["scram"].(bool)), + Enabled: aws.Bool(tfMap["scram"].(bool)), }, } diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index 1e8e35b3df8..184ae7f4645 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -200,7 +200,8 @@ func TestAccAWSMskCluster_ClientAuthentication_Sasl_Scram(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckMskClusterExists(resourceName, &cluster1), resource.TestCheckResourceAttr(resourceName, "client_authentication.#", "1"), - resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.0.scram", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.#", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.0.scram", "true"), ), }, { @@ -825,23 +826,25 @@ resource "aws_msk_cluster" "test" { func testAccMskClusterConfigClientAuthenticationSaslScram(rName string) string { return testAccMskClusterBaseConfig() + fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q +} + resource "aws_secretsmanager_secret" "test" { - name = "AmazonMSK_test" + name = "AmazonMSK_%[1]q" kms_key_id = aws_kms_key.test.key_id } resource "aws_secretsmanager_secret_version" "test" { secret_id = aws_secretsmanager_secret.test.id - secret_string = jsonencode({ username = "%s", password = "foobar" }) + secret_string = jsonencode({ username = "user", password = "pass" }) } -resource "aws_kms_key" "test" { - description = "test" -} - -resource "aws_msk_sasl_scram_secret" "test" { +resource "aws_msk_scram_secret_association" "test" { cluster_arn = aws_msk_cluster.test.arn secret_arn_list = [aws_secretsmanager_secret.test.arn] + + depends_on = [aws_secretsmanager_secret_version.test] } resource "aws_msk_cluster" "test" { diff --git a/aws/resource_aws_msk_scram_secret.go b/aws/resource_aws_msk_scram_secret.go deleted file mode 100644 index c6741d1cbe2..00000000000 --- a/aws/resource_aws_msk_scram_secret.go +++ /dev/null @@ -1,216 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func resourceAwsMskScramSecret() *schema.Resource { - return &schema.Resource{ - Create: resourceAwsMskScramSecretCreate, - Read: resourceAwsMskScramSecretRead, - Update: resourceAwsMskScramSecretUpdate, - Delete: resourceAwsMskScramSecretDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, - Schema: map[string]*schema.Schema{ - "cluster_arn": { - Type: schema.TypeString, - Required: true, - }, - "secret_arn_list": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "scram_secrets": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceAwsMskScramSecretCreate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kafkaconn - - existingSecrets, err := readMSKClusterSecrets(conn, d.Get("cluster_arn").(string)) - if err != nil { - return fmt.Errorf("failed lookup secrets %s", err) - } - - createSecrets := filterMskNewSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) - - out, err := associateMSKClusterSecrets(conn, d.Get("cluster_arn").(string), createSecrets) - if err != nil { - return fmt.Errorf("error associating credentials with MSK cluster: %s", err) - } - d.SetId(d.Get("cluster_arn").(string)) - - if len(out.UnprocessedScramSecrets) != 0 { - return fmt.Errorf("there were unprocessed secrets during association: %s", out.UnprocessedScramSecrets) - } - - return resourceAwsMskScramSecretRead(d, meta) -} - -func resourceAwsMskScramSecretRead(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kafkaconn - - scramSecrets, err := readMSKClusterSecrets(conn, d.Get("cluster_arn").(string)) - if err != nil { - return fmt.Errorf("failed lookup secrets %s", err) - } - - allSecrets := filterMskExistingSecrets(expandStringList(d.Get("secret_arn_list").([]interface{})), scramSecrets) - - d.SetId(d.Get("cluster_arn").(string)) - d.Set("arn", d.Get("cluster_arn").(string)) - d.Set("scram_secrets", aws.StringValueSlice(allSecrets)) - - return nil -} - -func resourceAwsMskScramSecretUpdate(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kafkaconn - - existingSecrets := expandStringList(d.Get("scram_secrets").([]interface{})) - - updateSecrets, deleteSecrets := filterMskSecretsForDeletion(expandStringList(d.Get("secret_arn_list").([]interface{})), existingSecrets) - - out, err := associateMSKClusterSecrets(conn, d.Get("cluster_arn").(string), updateSecrets) - if err != nil { - return fmt.Errorf("error associating credentials with MSK cluster: %s", err) - } - - if len(out.UnprocessedScramSecrets) != 0 { - return fmt.Errorf("there were unprocessed secrets during association: %s", out.UnprocessedScramSecrets) - } - - if len(deleteSecrets) > 0 { - deleteOutput, err := conn.BatchDisassociateScramSecret(&kafka.BatchDisassociateScramSecretInput{ - ClusterArn: aws.String(d.Get("cluster_arn").(string)), - SecretArnList: deleteSecrets, - }) - if err != nil { - return fmt.Errorf("error disassociating credentials with MSK cluster: %s", err) - } - - if len(deleteOutput.UnprocessedScramSecrets) != 0 { - return fmt.Errorf("there were unprocessed secrets during association: %s", out.UnprocessedScramSecrets) - } - } - - return resourceAwsMskScramSecretRead(d, meta) -} - -func resourceAwsMskScramSecretDelete(d *schema.ResourceData, meta interface{}) error { - conn := meta.(*AWSClient).kafkaconn - - _, err := conn.BatchDisassociateScramSecret(&kafka.BatchDisassociateScramSecretInput{ - ClusterArn: aws.String(d.Get("cluster_arn").(string)), - SecretArnList: expandStringList(d.Get("secret_arn_list").([]interface{})), - }) - if err != nil { - return fmt.Errorf("error disassociating credentials with MSK cluster: %s", err) - } - - return nil -} - -func readMSKClusterSecrets(conn *kafka.Kafka, clusterArn string) ([]*string, error) { - input := &kafka.ListScramSecretsInput{ - ClusterArn: aws.String(clusterArn), - } - - var scramSecrets []*string - err := conn.ListScramSecretsPages(input, - func(page *kafka.ListScramSecretsOutput, lastPage bool) bool { - scramSecrets = append(scramSecrets, page.SecretArnList...) - return !lastPage - }) - if err != nil { - return nil, err - } - - return scramSecrets, nil -} - -func associateMSKClusterSecrets(conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchAssociateScramSecretOutput, error) { - batch := 10 - - output := &kafka.BatchAssociateScramSecretOutput{} - - for i := 0; i < len(secretArnList); i += batch { - end := i + batch - if end > len(secretArnList) { - end = len(secretArnList) - } - out, err := conn.BatchAssociateScramSecret(&kafka.BatchAssociateScramSecretInput{ - ClusterArn: aws.String(clusterArn), - SecretArnList: secretArnList[i:end], - }) - if err != nil { - return nil, err - } - for _, secret := range out.UnprocessedScramSecrets { - if secret.ErrorCode != nil { - output.UnprocessedScramSecrets = append(output.UnprocessedScramSecrets, secret) - } - } - } - return output, nil -} - -func filterMskExistingSecrets(existingSecrets, newSecrets []*string) []*string { - finalSecrets := []*string{} - for _, existingSecret := range existingSecrets { - if contains(newSecrets, existingSecret) { - finalSecrets = append(finalSecrets, existingSecret) - } - } - return finalSecrets -} - -func filterMskNewSecrets(existingSecrets, newSecrets []*string) []*string { - finalSecrets := []*string{} - for _, existingSecret := range existingSecrets { - if !contains(newSecrets, existingSecret) { - finalSecrets = append(finalSecrets, existingSecret) - } - } - return finalSecrets -} - -func filterMskSecretsForDeletion(newSecrets, existingSecrets []*string) ([]*string, []*string) { - var updateSecrets, deleteSecrets []*string - for _, existingSecret := range existingSecrets { - if !contains(newSecrets, existingSecret) { - deleteSecrets = append(deleteSecrets, existingSecret) - } - } - for _, newSecret := range newSecrets { - if !contains(existingSecrets, newSecret) { - updateSecrets = append(updateSecrets, newSecret) - } - } - return updateSecrets, deleteSecrets -} - -func contains(slice []*string, val *string) bool { - for _, item := range slice { - if *item == *val { - return true - } - } - return false -} diff --git a/aws/resource_aws_msk_scram_secret_association.go b/aws/resource_aws_msk_scram_secret_association.go new file mode 100644 index 00000000000..1e60ff4defb --- /dev/null +++ b/aws/resource_aws_msk_scram_secret_association.go @@ -0,0 +1,203 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/msk/finder" +) + +const ScramSecretBatchSize = 10 + +func resourceAwsMskScramSecretAssociation() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsMskScramSecretAssociationCreate, + Read: resourceAwsMskScramSecretAssociationRead, + Update: resourceAwsMskScramSecretAssociationUpdate, + Delete: resourceAwsMskScramSecretAssociationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "cluster_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "secret_arn_list": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, + }, + }, + } +} + +func resourceAwsMskScramSecretAssociationCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + clusterArn := d.Get("cluster_arn").(string) + secretArnList := expandStringSet(d.Get("secret_arn_list").(*schema.Set)) + + output, err := associateMSKClusterSecrets(conn, clusterArn, secretArnList) + if err != nil { + return fmt.Errorf("error associating scram secret(s) to MSK cluster (%s): %w", clusterArn, err) + } + + d.SetId(aws.StringValue(output.ClusterArn)) + + if len(output.UnprocessedScramSecrets) != 0 { + return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + } + + return resourceAwsMskScramSecretAssociationRead(d, meta) +} + +func resourceAwsMskScramSecretAssociationRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + secretArnList, err := finder.ScramSecrets(conn, d.Id()) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { + log.Printf("[WARN] Scram secret(s) for MSK cluster (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("error reading MSK cluster (%s) scram secret(s): %w", d.Id(), err) + } + + d.Set("cluster_arn", d.Id()) + if err := d.Set("secret_arn_list", flattenStringSet(secretArnList)); err != nil { + return fmt.Errorf("error setting secret_arn_list: %w", err) + } + + return nil +} + +func resourceAwsMskScramSecretAssociationUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + o, n := d.GetChange("secret_arn_list") + oldSet, newSet := o.(*schema.Set), n.(*schema.Set) + + if newSet.Len() > 0 { + if newSecrets := newSet.Difference(oldSet); newSecrets.Len() > 0 { + output, err := associateMSKClusterSecrets(conn, d.Id(), expandStringSet(newSecrets)) + if err != nil { + return fmt.Errorf("error associating scram secret(s) with MSK cluster (%s): %w", d.Id(), err) + } + + if len(output.UnprocessedScramSecrets) != 0 { + return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + } + } + } + + if oldSet.Len() > 0 { + if deleteSecrets := oldSet.Difference(newSet); deleteSecrets.Len() > 0 { + output, err := disassociateMSKClusterSecrets(conn, d.Id(), expandStringSet(deleteSecrets)) + if err != nil { + return fmt.Errorf("error disassociating scram secret(s) from MSK cluster (%s): %w", d.Id(), err) + } + + if len(output.UnprocessedScramSecrets) != 0 { + return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + } + } + } + + return resourceAwsMskScramSecretAssociationRead(d, meta) +} + +func resourceAwsMskScramSecretAssociationDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).kafkaconn + + secretArnList, err := finder.ScramSecrets(conn, d.Id()) + + if err != nil { + if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { + return nil + } + return fmt.Errorf("error reading scram secret(s) for MSK cluster (%s): %w", d.Id(), err) + } + + if len(secretArnList) > 0 { + output, err := disassociateMSKClusterSecrets(conn, d.Id(), secretArnList) + if err != nil { + if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { + return nil + } + return fmt.Errorf("error disassociating scram secret(s) from MSK cluster (%s): %w", d.Id(), err) + } + if len(output.UnprocessedScramSecrets) != 0 { + return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + } + } + + return nil +} + +func associateMSKClusterSecrets(conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchAssociateScramSecretOutput, error) { + output := &kafka.BatchAssociateScramSecretOutput{} + + for i := 0; i < len(secretArnList); i += ScramSecretBatchSize { + end := i + ScramSecretBatchSize + if end > len(secretArnList) { + end = len(secretArnList) + } + + resp, err := conn.BatchAssociateScramSecret(&kafka.BatchAssociateScramSecretInput{ + ClusterArn: aws.String(clusterArn), + SecretArnList: secretArnList[i:end], + }) + if err != nil { + return nil, err + } + + output.ClusterArn = resp.ClusterArn + output.UnprocessedScramSecrets = append(output.UnprocessedScramSecrets, resp.UnprocessedScramSecrets...) + } + return output, nil +} + +func disassociateMSKClusterSecrets(conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchDisassociateScramSecretOutput, error) { + output := &kafka.BatchDisassociateScramSecretOutput{} + + for i := 0; i < len(secretArnList); i += ScramSecretBatchSize { + end := i + ScramSecretBatchSize + if end > len(secretArnList) { + end = len(secretArnList) + } + + resp, err := conn.BatchDisassociateScramSecret(&kafka.BatchDisassociateScramSecretInput{ + ClusterArn: aws.String(clusterArn), + SecretArnList: secretArnList[i:end], + }) + if err != nil { + return nil, err + } + + output.ClusterArn = resp.ClusterArn + output.UnprocessedScramSecrets = append(output.UnprocessedScramSecrets, resp.UnprocessedScramSecrets...) + } + return output, nil +} + +func flattenUnprocessedScramSecrets(clusterArn *string, secrets []*kafka.UnprocessedScramSecret) error { + var errors *multierror.Error + for _, s := range secrets { + secretArn, errMsg := aws.StringValue(s.SecretArn), aws.StringValue(s.ErrorMessage) + errors = multierror.Append(errors, fmt.Errorf("error associating MSK cluster (%s) with scram secret (%s): %s", aws.StringValue(clusterArn), secretArn, errMsg)) + } + return errors.ErrorOrNil() +} diff --git a/aws/resource_aws_msk_scram_secret_association_test.go b/aws/resource_aws_msk_scram_secret_association_test.go new file mode 100644 index 00000000000..e2ceb357c8b --- /dev/null +++ b/aws/resource_aws_msk_scram_secret_association_test.go @@ -0,0 +1,247 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafka" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/msk/finder" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/tfawsresource" +) + +func TestAccAwsMskScramSecretAssociation_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_scram_secret_association.test" + clusterResourceName := "aws_msk_cluster.test" + secretResourceName := "aws_secretsmanager_secret.test.0" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskScramSecretAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskScramSecretAssociation_basic(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskScramSecretAssociationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "cluster_arn", clusterResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "secret_arn_list.#", "1"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "secret_arn_list.*", secretResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsMskScramSecretAssociation_update(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_scram_secret_association.test" + secretResourceName := "aws_secretsmanager_secret.test.0" + secretResourceName2 := "aws_secretsmanager_secret.test.1" + secretResourceName3 := "aws_secretsmanager_secret.test.2" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskScramSecretAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskScramSecretAssociation_basic(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskScramSecretAssociationExists(resourceName), + ), + }, + { + Config: testAccMskScramSecretAssociation_basic(rName, 3), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskScramSecretAssociationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "secret_arn_list.#", "3"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "secret_arn_list.*", secretResourceName, "arn"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "secret_arn_list.*", secretResourceName2, "arn"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "secret_arn_list.*", secretResourceName3, "arn"), + ), + }, + { + Config: testAccMskScramSecretAssociation_basic(rName, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskScramSecretAssociationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "secret_arn_list.#", "2"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "secret_arn_list.*", secretResourceName, "arn"), + tfawsresource.TestCheckTypeSetElemAttrPair(resourceName, "secret_arn_list.*", secretResourceName2, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsMskScramSecretAssociation_disappears(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_scram_secret_association.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskScramSecretAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskScramSecretAssociation_basic(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskScramSecretAssociationExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsMskScramSecretAssociation(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAwsMskScramSecretAssociation_clusterDisappears(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_msk_scram_secret_association.test" + clusterResourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSMsk(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckMskScramSecretAssociationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMskScramSecretAssociation_basic(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskScramSecretAssociationExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsMskCluster(), clusterResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckMskScramSecretAssociationDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_msk_scram_secret_association" { + continue + } + + conn := testAccProvider.Meta().(*AWSClient).kafkaconn + input := &kafka.ListScramSecretsInput{ + ClusterArn: aws.String(rs.Primary.ID), + } + + _, err := conn.ListScramSecrets(input) + if err != nil { + if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { + continue + } + return err + } + } + return nil +} + +func testAccCheckMskScramSecretAssociationExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set for %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).kafkaconn + _, err := finder.ScramSecrets(conn, rs.Primary.ID) + if err != nil { + return err + } + return nil + } +} + +func testAccMskScramSecretAssociationBaseConfig(rName string, count int) string { + return fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.5.1" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = [aws_subnet.example_subnet_az1.id, aws_subnet.example_subnet_az2.id, aws_subnet.example_subnet_az3.id] + ebs_volume_size = 10 + instance_type = "kafka.t3.small" + security_groups = [aws_security_group.example_sg.id] + } + + client_authentication { + sasl { + scram = true + } + } +} + +resource "aws_kms_key" "test" { + count = %[2]d + description = "%[1]s-${count.index+1}" +} + +resource "aws_secretsmanager_secret" "test" { + count = %[2]d + name = "AmazonMSK_%[1]s-${count.index+1}" + kms_key_id = aws_kms_key.test[count.index].id +} + +resource "aws_secretsmanager_secret_version" "test" { + count = %[2]d + secret_id = aws_secretsmanager_secret.test[count.index].id + secret_string = jsonencode({ username = "user", password = "pass" }) +} + +resource "aws_secretsmanager_secret_policy" "test" { + count = %[2]d + secret_arn = aws_secretsmanager_secret.test[count.index].arn + policy = < count { - return fmt.Errorf("Too many secrets for %v", rs.Primary.Attributes["secret_arn_list"]) - } - - return nil - } -} - -func testAccCheckAwsSaslScramSecretsDestruction(resourceName string) resource.TestCheckFunc { - return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[resourceName] - if ok { - return fmt.Errorf("Should not have found: %s", resourceName) - } - - return nil - } -} - -func testAccMskSaslScramSecret_basic(rName string) string { - return testAccMskClusterBaseConfig() + fmt.Sprintf(` -resource "aws_msk_cluster" "test" { - cluster_name = %[1]q - kafka_version = "2.5.1" - number_of_broker_nodes = 3 - - broker_node_group_info { - client_subnets = [aws_subnet.example_subnet_az1.id, aws_subnet.example_subnet_az2.id, aws_subnet.example_subnet_az3.id] - ebs_volume_size = 10 - instance_type = "kafka.t3.small" - security_groups = [aws_security_group.example_sg.id] - } - - client_authentication { - sasl { - scram = true - } - } -} - -resource "aws_kms_key" "test" { - description = "%s-kms-key-msk" -} - -resource "aws_secretsmanager_secret" "test_basic" { - name = "AmazonMSK_test_%s_1_basic" - kms_key_id = aws_kms_key.test.key_id -} - -resource "aws_secretsmanager_secret_version" "test" { - secret_id = aws_secretsmanager_secret.test_basic.id - secret_string = jsonencode({ username = "%s", password = "foobar" }) -} - -resource "aws_msk_sasl_scram_secret" "test" { - cluster_arn = aws_msk_cluster.test.arn - secret_arn_list = [aws_secretsmanager_secret.test_basic.arn] -} - -resource "aws_secretsmanager_secret_policy" "test" { - secret_arn = aws_secretsmanager_secret.test_basic.arn - policy = < Date: Wed, 18 Nov 2020 03:41:13 -0500 Subject: [PATCH 5/6] fix linting errors --- aws/resource_aws_msk_cluster_test.go | 4 ++-- aws/resource_aws_msk_scram_secret_association_test.go | 6 +++--- website/docs/r/msk_cluster.html.markdown | 10 +++++----- .../docs/r/msk_scram_secret_association.html.markdown | 10 +++++----- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index 184ae7f4645..19098c7436a 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -831,7 +831,7 @@ resource "aws_kms_key" "test" { } resource "aws_secretsmanager_secret" "test" { - name = "AmazonMSK_%[1]q" + name = "AmazonMSK_%[1]s" kms_key_id = aws_kms_key.test.key_id } @@ -844,7 +844,7 @@ resource "aws_msk_scram_secret_association" "test" { cluster_arn = aws_msk_cluster.test.arn secret_arn_list = [aws_secretsmanager_secret.test.arn] - depends_on = [aws_secretsmanager_secret_version.test] + depends_on = [aws_secretsmanager_secret_version.test] } resource "aws_msk_cluster" "test" { diff --git a/aws/resource_aws_msk_scram_secret_association_test.go b/aws/resource_aws_msk_scram_secret_association_test.go index e2ceb357c8b..23e36cb5f31 100644 --- a/aws/resource_aws_msk_scram_secret_association_test.go +++ b/aws/resource_aws_msk_scram_secret_association_test.go @@ -197,12 +197,12 @@ resource "aws_msk_cluster" "test" { resource "aws_kms_key" "test" { count = %[2]d - description = "%[1]s-${count.index+1}" + description = "%[1]s-${count.index + 1}" } resource "aws_secretsmanager_secret" "test" { count = %[2]d - name = "AmazonMSK_%[1]s-${count.index+1}" + name = "AmazonMSK_%[1]s-${count.index + 1}" kms_key_id = aws_kms_key.test[count.index].id } @@ -241,7 +241,7 @@ resource "aws_msk_scram_secret_association" "test" { cluster_arn = aws_msk_cluster.test.arn secret_arn_list = aws_secretsmanager_secret.test[*].arn - depends_on = [aws_secretsmanager_secret_version.test] + depends_on = [aws_secretsmanager_secret_version.test] } `) } diff --git a/website/docs/r/msk_cluster.html.markdown b/website/docs/r/msk_cluster.html.markdown index d214a507cd3..32acca45322 100644 --- a/website/docs/r/msk_cluster.html.markdown +++ b/website/docs/r/msk_cluster.html.markdown @@ -186,17 +186,17 @@ The following arguments are supported: ### client_authentication Argument Reference -* `tls` - (Optional) Configuration block for specifying TLS client authentication. See below. * `sasl` - (Optional) Configuration block for specifying Sasl client authentication. See below. - -#### client_authentication tls Argument Reference - -* `certificate_authority_arns` - (Optional) List of ACM Certificate Authority Amazon Resource Names (ARNs). +* `tls` - (Optional) Configuration block for specifying TLS client authentication. See below. #### client_authentication sasl Argument Reference * `scram` - (Optional) Enables scram client authentication via AWS Secrets Manager. Defaults to `false`. +#### client_authentication tls Argument Reference + +* `certificate_authority_arns` - (Optional) List of ACM Certificate Authority Amazon Resource Names (ARNs). + ### configuration_info Argument Reference * `arn` - (Required) Amazon Resource Name (ARN) of the MSK Configuration to use in the cluster. diff --git a/website/docs/r/msk_scram_secret_association.html.markdown b/website/docs/r/msk_scram_secret_association.html.markdown index 095011a8bfb..a2799903930 100644 --- a/website/docs/r/msk_scram_secret_association.html.markdown +++ b/website/docs/r/msk_scram_secret_association.html.markdown @@ -19,8 +19,8 @@ resource "aws_msk_scram_secret_association" "example" { } resource "aws_msk_cluster" "example" { - cluster_name = "example" - kafka_version = "2.4.1" + cluster_name = "example" + kafka_version = "2.4.1" # ... other configuration... client_authentication { sasl { @@ -30,7 +30,7 @@ resource "aws_msk_cluster" "example" { } resource "aws_secretsmanager_secret" "example" { - name = "AmazonMSK_example" + name = "AmazonMSK_example" } resource "aws_secretsmanager_secret_policy" "msk" { @@ -66,10 +66,10 @@ In addition to all arguments above, the following attributes are exported: * `id` - Amazon Resource Name (ARN) of the MSK cluster. -## Import +## Import MSK Scram Secret Associations can be imported using the `id` e.g. ``` $ terraform import aws_msk_scram_secret_association.example arn:aws:kafka:us-west-2:123456789012:cluster/example/279c0212-d057-4dba-9aa9-1c4e5a25bfc7-3 -``` \ No newline at end of file +``` From cd3158fb57976e12dcebeade9647405ed351a0c5 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 25 Nov 2020 07:20:11 -0500 Subject: [PATCH 6/6] test and doc updates --- aws/data_source_aws_msk_cluster.go | 6 +- aws/data_source_aws_msk_cluster_test.go | 1 + aws/resource_aws_msk_cluster.go | 80 ++++++++++--------- aws/resource_aws_msk_cluster_test.go | 79 +++++++++--------- ...source_aws_msk_scram_secret_association.go | 20 +++-- ...e_aws_msk_scram_secret_association_test.go | 4 +- website/docs/d/msk_cluster.html.markdown | 2 +- website/docs/r/msk_cluster.html.markdown | 6 +- ...msk_scram_secret_association.html.markdown | 42 +++++++--- 9 files changed, 136 insertions(+), 104 deletions(-) diff --git a/aws/data_source_aws_msk_cluster.go b/aws/data_source_aws_msk_cluster.go index e2c798e4152..dca1ce7ded1 100644 --- a/aws/data_source_aws_msk_cluster.go +++ b/aws/data_source_aws_msk_cluster.go @@ -23,11 +23,11 @@ func dataSourceAwsMskCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "bootstrap_brokers_tls": { + "bootstrap_brokers_sasl_scram": { Type: schema.TypeString, Computed: true, }, - "bootstrap_brokers_sasl_scram": { + "bootstrap_brokers_tls": { Type: schema.TypeString, Computed: true, }, @@ -104,8 +104,8 @@ func dataSourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error d.Set("arn", aws.StringValue(cluster.ClusterArn)) d.Set("bootstrap_brokers", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerString)) - d.Set("bootstrap_brokers_tls", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerStringTls)) d.Set("bootstrap_brokers_sasl_scram", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerStringSaslScram)) + d.Set("bootstrap_brokers_tls", aws.StringValue(bootstrapBrokersoOutput.BootstrapBrokerStringTls)) d.Set("cluster_name", aws.StringValue(cluster.ClusterName)) d.Set("kafka_version", aws.StringValue(cluster.CurrentBrokerSoftwareInfo.KafkaVersion)) d.Set("number_of_broker_nodes", aws.Int64Value(cluster.NumberOfBrokerNodes)) diff --git a/aws/data_source_aws_msk_cluster_test.go b/aws/data_source_aws_msk_cluster_test.go index 5659d058c60..cb20092a374 100644 --- a/aws/data_source_aws_msk_cluster_test.go +++ b/aws/data_source_aws_msk_cluster_test.go @@ -24,6 +24,7 @@ func TestAccAWSMskClusterDataSource_Name(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(resourceName, "arn", dataSourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "bootstrap_brokers", ""), + resource.TestCheckResourceAttrPair(resourceName, "bootstrap_brokers_sasl_scram", dataSourceName, "bootstrap_brokers_sasl_scram"), resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers_tls", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), // Hostname ordering not guaranteed between resource and data source reads resource.TestCheckResourceAttrPair(resourceName, "cluster_name", dataSourceName, "cluster_name"), resource.TestCheckResourceAttrPair(resourceName, "kafka_version", dataSourceName, "kafka_version"), diff --git a/aws/resource_aws_msk_cluster.go b/aws/resource_aws_msk_cluster.go index 1cfa8e4c982..e7805a4e8fe 100644 --- a/aws/resource_aws_msk_cluster.go +++ b/aws/resource_aws_msk_cluster.go @@ -38,11 +38,11 @@ func resourceAwsMskCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "bootstrap_brokers_tls": { + "bootstrap_brokers_sasl_scram": { Type: schema.TypeString, Computed: true, }, - "bootstrap_brokers_sasl_scram": { + "bootstrap_brokers_tls": { Type: schema.TypeString, Computed: true, }, @@ -98,40 +98,40 @@ func resourceAwsMskCluster() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "tls": { + "sasl": { Type: schema.TypeList, Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "certificate_authority_arns": { - Type: schema.TypeSet, + "scram": { + Type: schema.TypeBool, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validateArn, - }, }, }, }, + ConflictsWith: []string{"client_authentication.0.tls"}, }, - "sasl": { + "tls": { Type: schema.TypeList, Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "scram": { - Type: schema.TypeBool, + "certificate_authority_arns": { + Type: schema.TypeSet, Optional: true, ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validateArn, + }, }, }, }, - ConflictsWith: []string{"client_authentication.0.tls"}, }, }, }, @@ -464,8 +464,8 @@ func resourceAwsMskClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("arn", aws.StringValue(cluster.ClusterArn)) d.Set("bootstrap_brokers", aws.StringValue(brokerOut.BootstrapBrokerString)) - d.Set("bootstrap_brokers_tls", aws.StringValue(brokerOut.BootstrapBrokerStringTls)) d.Set("bootstrap_brokers_sasl_scram", aws.StringValue(brokerOut.BootstrapBrokerStringSaslScram)) + d.Set("bootstrap_brokers_tls", aws.StringValue(brokerOut.BootstrapBrokerStringTls)) if err := d.Set("broker_node_group_info", flattenMskBrokerNodeGroupInfo(cluster.BrokerNodeGroupInfo)); err != nil { return fmt.Errorf("error setting broker_node_group_info: %s", err) @@ -684,8 +684,8 @@ func expandMskClusterClientAuthentication(l []interface{}) *kafka.ClientAuthenti m := l[0].(map[string]interface{}) ca := &kafka.ClientAuthentication{ - Tls: expandMskClusterTls(m["tls"].([]interface{})), Sasl: expandMskClusterScram(m["sasl"].([]interface{})), + Tls: expandMskClusterTls(m["tls"].([]interface{})), } return ca @@ -741,20 +741,6 @@ func expandMskClusterEncryptionInTransit(l []interface{}) *kafka.EncryptionInTra return eit } -func expandMskClusterTls(l []interface{}) *kafka.Tls { - if len(l) == 0 || l[0] == nil { - return nil - } - - m := l[0].(map[string]interface{}) - - tls := &kafka.Tls{ - CertificateAuthorityArnList: expandStringSet(m["certificate_authority_arns"].(*schema.Set)), - } - - return tls -} - func expandMskClusterScram(l []interface{}) *kafka.Sasl { if len(l) == 0 || l[0] == nil { return nil @@ -774,6 +760,20 @@ func expandMskClusterScram(l []interface{}) *kafka.Sasl { return sasl } +func expandMskClusterTls(l []interface{}) *kafka.Tls { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + tls := &kafka.Tls{ + CertificateAuthorityArnList: expandStringSet(m["certificate_authority_arns"].(*schema.Set)), + } + + return tls +} + func expandMskOpenMonitoring(l []interface{}) *kafka.OpenMonitoringInfo { if len(l) == 0 || l[0] == nil { return nil @@ -933,8 +933,8 @@ func flattenMskClientAuthentication(ca *kafka.ClientAuthentication) []map[string } m := map[string]interface{}{ - "tls": flattenMskTls(ca.Tls), "sasl": flattenMskSasl(ca.Sasl), + "tls": flattenMskTls(ca.Tls), } return []map[string]interface{}{m} @@ -979,25 +979,33 @@ func flattenMskEncryptionInTransit(eit *kafka.EncryptionInTransit) []map[string] return []map[string]interface{}{m} } -func flattenMskTls(tls *kafka.Tls) []map[string]interface{} { - if tls == nil { +func flattenMskSasl(sasl *kafka.Sasl) []map[string]interface{} { + if sasl == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "certificate_authority_arns": aws.StringValueSlice(tls.CertificateAuthorityArnList), + "scram": flattenMskScram(sasl.Scram), } return []map[string]interface{}{m} } -func flattenMskSasl(sasl *kafka.Sasl) []map[string]interface{} { - if sasl == nil { +func flattenMskScram(scram *kafka.Scram) bool { + if scram == nil { + return false + } + + return aws.BoolValue(scram.Enabled) +} + +func flattenMskTls(tls *kafka.Tls) []map[string]interface{} { + if tls == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "scram": aws.Bool(true), + "certificate_authority_arns": aws.StringValueSlice(tls.CertificateAuthorityArnList), } return []map[string]interface{}{m} diff --git a/aws/resource_aws_msk_cluster_test.go b/aws/resource_aws_msk_cluster_test.go index 7c4db39132d..b8fff04b9ad 100644 --- a/aws/resource_aws_msk_cluster_test.go +++ b/aws/resource_aws_msk_cluster_test.go @@ -71,6 +71,7 @@ func TestAccAWSMskCluster_basic(t *testing.T) { testAccCheckMskClusterExists(resourceName, &cluster), testAccMatchResourceAttrRegionalARN(resourceName, "arn", "kafka", regexp.MustCompile(`cluster/.+`)), resource.TestCheckResourceAttr(resourceName, "bootstrap_brokers", ""), + resource.TestCheckResourceAttr(resourceName, "bootstrap_brokers_sasl_scram", ""), resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers_tls", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.az_distribution", kafka.BrokerAZDistributionDefault), @@ -151,10 +152,8 @@ func TestAccAWSMskCluster_BrokerNodeGroupInfo_EbsVolumeSize(t *testing.T) { }) } -func TestAccAWSMskCluster_ClientAuthentication_Tls_CertificateAuthorityArns(t *testing.T) { - TestAccSkip(t, "Requires the aws_acmpca_certificate_authority resource to support importing the root CA certificate") - - var cluster1 kafka.ClusterInfo +func TestAccAWSMskCluster_ClientAuthentication_Sasl_Scram(t *testing.T) { + var cluster1, cluster2 kafka.ClusterInfo rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_msk_cluster.test" @@ -164,12 +163,33 @@ func TestAccAWSMskCluster_ClientAuthentication_Tls_CertificateAuthorityArns(t *t CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfigClientAuthenticationTlsCertificateAuthorityArns(rName), + Config: testAccMskClusterConfigClientAuthenticationSaslScram(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckMskClusterExists(resourceName, &cluster1), + resource.TestMatchResourceAttr(resourceName, "bootstrap_brokers_sasl_scram", regexp.MustCompile(`^(([-\w]+\.){1,}[\w]+:\d+,){2,}([-\w]+\.){1,}[\w]+:\d+$`)), resource.TestCheckResourceAttr(resourceName, "client_authentication.#", "1"), - resource.TestCheckResourceAttr(resourceName, "client_authentication.0.tls.#", "1"), - resource.TestCheckResourceAttr(resourceName, "configuration_info.0.tls.0.certificate_authority_arns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.#", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.0.scram", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "bootstrap_brokers", // API may mutate ordering and selection of brokers to return + "bootstrap_brokers_tls", // API may mutate ordering and selection of brokers to return + }, + }, + { + Config: testAccMskClusterConfigClientAuthenticationSaslScram(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckMskClusterExists(resourceName, &cluster2), + testAccCheckMskClusterRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "bootstrap_brokers_sasl_scram", ""), + resource.TestCheckResourceAttr(resourceName, "client_authentication.#", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.#", "1"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.0.scram", "false"), ), }, { @@ -185,7 +205,9 @@ func TestAccAWSMskCluster_ClientAuthentication_Tls_CertificateAuthorityArns(t *t }) } -func TestAccAWSMskCluster_ClientAuthentication_Sasl_Scram(t *testing.T) { +func TestAccAWSMskCluster_ClientAuthentication_Tls_CertificateAuthorityArns(t *testing.T) { + TestAccSkip(t, "Requires the aws_acmpca_certificate_authority resource to support importing the root CA certificate") + var cluster1 kafka.ClusterInfo rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_msk_cluster.test" @@ -196,12 +218,12 @@ func TestAccAWSMskCluster_ClientAuthentication_Sasl_Scram(t *testing.T) { CheckDestroy: testAccCheckMskClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccMskClusterConfigClientAuthenticationSaslScram(rName), + Config: testAccMskClusterConfigClientAuthenticationTlsCertificateAuthorityArns(rName), Check: resource.ComposeTestCheckFunc( testAccCheckMskClusterExists(resourceName, &cluster1), resource.TestCheckResourceAttr(resourceName, "client_authentication.#", "1"), - resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.#", "1"), - resource.TestCheckResourceAttr(resourceName, "client_authentication.0.sasl.0.scram", "true"), + resource.TestCheckResourceAttr(resourceName, "client_authentication.0.tls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "configuration_info.0.tls.0.certificate_authority_arns.#", "1"), ), }, { @@ -956,32 +978,11 @@ resource "aws_msk_cluster" "test" { `, rName) } -func testAccMskClusterConfigClientAuthenticationSaslScram(rName string) string { +func testAccMskClusterConfigClientAuthenticationSaslScram(rName string, enabled bool) string { return testAccMskClusterBaseConfig() + fmt.Sprintf(` -resource "aws_kms_key" "test" { - description = %[1]q -} - -resource "aws_secretsmanager_secret" "test" { - name = "AmazonMSK_%[1]s" - kms_key_id = aws_kms_key.test.key_id -} - -resource "aws_secretsmanager_secret_version" "test" { - secret_id = aws_secretsmanager_secret.test.id - secret_string = jsonencode({ username = "user", password = "pass" }) -} - -resource "aws_msk_scram_secret_association" "test" { - cluster_arn = aws_msk_cluster.test.arn - secret_arn_list = [aws_secretsmanager_secret.test.arn] - - depends_on = [aws_secretsmanager_secret_version.test] -} - resource "aws_msk_cluster" "test" { cluster_name = %[1]q - kafka_version = "2.2.1" + kafka_version = "2.6.0" number_of_broker_nodes = 3 broker_node_group_info { @@ -993,17 +994,11 @@ resource "aws_msk_cluster" "test" { client_authentication { sasl { - scram = true - } - } - - encryption_info { - encryption_in_transit { - client_broker = "TLS" + scram = %t } } } -`, rName) +`, rName, enabled) } func testAccMskClusterConfigConfigurationInfoRevision1(rName string) string { diff --git a/aws/resource_aws_msk_scram_secret_association.go b/aws/resource_aws_msk_scram_secret_association.go index 1e60ff4defb..20f1616a6d7 100644 --- a/aws/resource_aws_msk_scram_secret_association.go +++ b/aws/resource_aws_msk_scram_secret_association.go @@ -12,7 +12,11 @@ import ( "github.com/terraform-providers/terraform-provider-aws/aws/internal/service/msk/finder" ) -const ScramSecretBatchSize = 10 +const ( + AssociatingSecret = "associating" + DisassociatingSecret = "disassociating" + ScramSecretBatchSize = 10 +) func resourceAwsMskScramSecretAssociation() *schema.Resource { return &schema.Resource{ @@ -56,7 +60,7 @@ func resourceAwsMskScramSecretAssociationCreate(d *schema.ResourceData, meta int d.SetId(aws.StringValue(output.ClusterArn)) if len(output.UnprocessedScramSecrets) != 0 { - return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + return unprocessedScramSecretsError(output.ClusterArn, output.UnprocessedScramSecrets, AssociatingSecret) } return resourceAwsMskScramSecretAssociationRead(d, meta) @@ -98,7 +102,7 @@ func resourceAwsMskScramSecretAssociationUpdate(d *schema.ResourceData, meta int } if len(output.UnprocessedScramSecrets) != 0 { - return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + return unprocessedScramSecretsError(output.ClusterArn, output.UnprocessedScramSecrets, AssociatingSecret) } } } @@ -111,7 +115,7 @@ func resourceAwsMskScramSecretAssociationUpdate(d *schema.ResourceData, meta int } if len(output.UnprocessedScramSecrets) != 0 { - return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + return unprocessedScramSecretsError(output.ClusterArn, output.UnprocessedScramSecrets, DisassociatingSecret) } } } @@ -140,7 +144,7 @@ func resourceAwsMskScramSecretAssociationDelete(d *schema.ResourceData, meta int return fmt.Errorf("error disassociating scram secret(s) from MSK cluster (%s): %w", d.Id(), err) } if len(output.UnprocessedScramSecrets) != 0 { - return flattenUnprocessedScramSecrets(output.ClusterArn, output.UnprocessedScramSecrets) + return unprocessedScramSecretsError(output.ClusterArn, output.UnprocessedScramSecrets, DisassociatingSecret) } } @@ -193,11 +197,13 @@ func disassociateMSKClusterSecrets(conn *kafka.Kafka, clusterArn string, secretA return output, nil } -func flattenUnprocessedScramSecrets(clusterArn *string, secrets []*kafka.UnprocessedScramSecret) error { +func unprocessedScramSecretsError(clusterArn *string, secrets []*kafka.UnprocessedScramSecret, action string) error { var errors *multierror.Error + for _, s := range secrets { secretArn, errMsg := aws.StringValue(s.SecretArn), aws.StringValue(s.ErrorMessage) - errors = multierror.Append(errors, fmt.Errorf("error associating MSK cluster (%s) with scram secret (%s): %s", aws.StringValue(clusterArn), secretArn, errMsg)) + errors = multierror.Append(errors, fmt.Errorf("error %s MSK cluster (%s) with scram secret (%s): %s", action, aws.StringValue(clusterArn), secretArn, errMsg)) } + return errors.ErrorOrNil() } diff --git a/aws/resource_aws_msk_scram_secret_association_test.go b/aws/resource_aws_msk_scram_secret_association_test.go index 23e36cb5f31..cf0462e000d 100644 --- a/aws/resource_aws_msk_scram_secret_association_test.go +++ b/aws/resource_aws_msk_scram_secret_association_test.go @@ -176,6 +176,8 @@ func testAccCheckMskScramSecretAssociationExists(resourceName string) resource.T func testAccMskScramSecretAssociationBaseConfig(rName string, count int) string { return fmt.Sprintf(` +data "aws_partition" "current" {} + resource "aws_msk_cluster" "test" { cluster_name = %[1]q kafka_version = "2.5.1" @@ -222,7 +224,7 @@ resource "aws_secretsmanager_secret_policy" "test" { "Sid": "AWSKafkaResourcePolicy", "Effect" : "Allow", "Principal" : { - "Service" : "kafka.amazonaws.com" + "Service" : "kafka.${data.aws_partition.current.dns_suffix}" }, "Action" : "secretsmanager:getSecretValue", "Resource" : "${aws_secretsmanager_secret.test[count.index].arn}" diff --git a/website/docs/d/msk_cluster.html.markdown b/website/docs/d/msk_cluster.html.markdown index 9c6d215c3cc..86163bd6eb5 100644 --- a/website/docs/d/msk_cluster.html.markdown +++ b/website/docs/d/msk_cluster.html.markdown @@ -30,8 +30,8 @@ In addition to all arguments above, the following attributes are exported: * `arn` - Amazon Resource Name (ARN) of the MSK cluster. * `bootstrap_brokers` - A comma separated list of one or more hostname:port pairs of Kafka brokers suitable to boostrap connectivity to the Kafka cluster. -* `bootstrap_brokers_tls` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity to the kafka cluster. * `bootstrap_brokers_sasl_scram` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity using SASL/SCRAM to the kafka cluster. +* `bootstrap_brokers_tls` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity to the kafka cluster. * `kafka_version` - Apache Kafka version. * `number_of_broker_nodes` - Number of broker nodes in the cluster. * `tags` - Map of key-value pairs assigned to the cluster. diff --git a/website/docs/r/msk_cluster.html.markdown b/website/docs/r/msk_cluster.html.markdown index 32acca45322..77b6245b2ad 100644 --- a/website/docs/r/msk_cluster.html.markdown +++ b/website/docs/r/msk_cluster.html.markdown @@ -186,12 +186,12 @@ The following arguments are supported: ### client_authentication Argument Reference -* `sasl` - (Optional) Configuration block for specifying Sasl client authentication. See below. +* `sasl` - (Optional) Configuration block for specifying SASL client authentication. See below. * `tls` - (Optional) Configuration block for specifying TLS client authentication. See below. #### client_authentication sasl Argument Reference -* `scram` - (Optional) Enables scram client authentication via AWS Secrets Manager. Defaults to `false`. +* `scram` - (Optional) Enables SCRAM client authentication via AWS Secrets Manager. Defaults to `false`. #### client_authentication tls Argument Reference @@ -255,8 +255,8 @@ In addition to all arguments above, the following attributes are exported: * `arn` - Amazon Resource Name (ARN) of the MSK cluster. * `bootstrap_brokers` - A comma separated list of one or more hostname:port pairs of kafka brokers suitable to boostrap connectivity to the kafka cluster. Only contains value if `client_broker` encryption in transit is set to `PLAINTEXT` or `TLS_PLAINTEXT`. -* `bootstrap_brokers_tls` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity to the kafka cluster. Only contains value if `client_broker` encryption in transit is set to `TLS_PLAINTEXT` or `TLS`. * `bootstrap_brokers_sasl_scram` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity using SASL/SCRAM to the kafka cluster. Only contains value if `client_broker` encryption in transit is set to `TLS_PLAINTEXT` or `TLS` and `client_authentication` is set to `sasl`. +* `bootstrap_brokers_tls` - A comma separated list of one or more DNS names (or IPs) and TLS port pairs kafka brokers suitable to boostrap connectivity to the kafka cluster. Only contains value if `client_broker` encryption in transit is set to `TLS_PLAINTEXT` or `TLS`. * `current_version` - Current version of the MSK Cluster used for updates, e.g. `K13V1IB3VIYZZH` * `encryption_info.0.encryption_at_rest_kms_key_arn` - The ARN of the KMS key used for encryption at rest of the broker data volumes. * `zookeeper_connect_string` - A comma separated list of one or more hostname:port pairs to use to connect to the Apache Zookeeper cluster. diff --git a/website/docs/r/msk_scram_secret_association.html.markdown b/website/docs/r/msk_scram_secret_association.html.markdown index a2799903930..017fc5b6be9 100644 --- a/website/docs/r/msk_scram_secret_association.html.markdown +++ b/website/docs/r/msk_scram_secret_association.html.markdown @@ -3,12 +3,22 @@ subcategory: "Managed Streaming for Kafka (MSK)" layout: "aws" page_title: "AWS: aws_msk_scram_secret_association" description: |- - Provides resource for managing an AWS Managed Streaming for Kafka (MSK) Scram secret association + Associates SCRAM secrets with a Managed Streaming for Kafka (MSK) cluster. --- # Resource: aws_msk_scram_secret_association -Manages AWS Managed Streaming for Kafka Scram secrets. It is important to note that a policy is required for the secret resource in order for Kafka to be able to read it. This policy is attached automatically when the `aws_msk_scram_secret_association` is used, however, this policy will not be in terraform and as such, will present a diff on plan/apply. For that reason, you must use the secrets managed policy as shown below in order to ensure that the state is in a clean state after the creation of secret and the association to the cluster. +Associates SCRAM secrets stored in the Secrets Manager service with a Managed Streaming for Kafka (MSK) cluster. + +-> **Note:** The following assumes the MSK cluster has SASL/SCRAM authentication enabled. See below for example usage or refer to the [Username/Password Authentication](https://docs.aws.amazon.com/msk/latest/developerguide/msk-password.html) section of the MSK Developer Guide for more details. + +To set up username and password authentication for a cluster, create an [`aws_secretsmanager_secret` resource](/docs/providers/aws/r/secretsmanager_secret.html) and associate +a username and password with the secret with an [`aws_secretsmanager_secret_version` resource](/docs/providers/aws/r/secretsmanager_secret_version.html). When creating a secret for the cluster, +the `name` must have the prefix `AmazonMSK_` and you must either use an existing custom AWS KMS key or create a new +custom AWS KMS key for your secret with the [`aws_kms_key` resource](/docs/providers/aws/r/kms_key.html). It is important to note that a policy is required for the `aws_secretsmanager_secret` +resource in order for Kafka to be able to read it. This policy is attached automatically when the `aws_msk_scram_secret_association` is used, +however, this policy will not be in terraform and as such, will present a diff on plan/apply. For that reason, you must use the [`aws_secretsmanager_secret_policy` +resource](/docs/providers/aws/r/secretsmanager_secret_policy.html) as shown below in order to ensure that the state is in a clean state after the creation of secret and the association to the cluster. ## Example Usage @@ -16,11 +26,12 @@ Manages AWS Managed Streaming for Kafka Scram secrets. It is important to note resource "aws_msk_scram_secret_association" "example" { cluster_arn = aws_msk_cluster.example.arn secret_arn_list = [aws_secretsmanager_secret.example.arn] + + depends_on = [aws_secretsmanager_secret_version.example] } resource "aws_msk_cluster" "example" { - cluster_name = "example" - kafka_version = "2.4.1" + cluster_name = "example" # ... other configuration... client_authentication { sasl { @@ -30,11 +41,21 @@ resource "aws_msk_cluster" "example" { } resource "aws_secretsmanager_secret" "example" { - name = "AmazonMSK_example" + name = "AmazonMSK_example" + kms_key_id = aws_kms_key.example.key_id +} + +resource "aws_kms_key" "example" { + description = "Example Key for MSK Cluster Scram Secret Association" +} + +resource "aws_secretsmanager_secret_version" "example" { + secret_id = aws_secretsmanager_secret.example.id + secret_string = jsonencode({ username = "user", password = "pass" }) } -resource "aws_secretsmanager_secret_policy" "msk" { - secret_arn = aws_secretsmanager_secret.msk.arn +resource "aws_secretsmanager_secret_policy" "example" { + secret_arn = aws_secretsmanager_secret.example.arn policy = <