diff --git a/go.mod b/go.mod index 1a05d1a50cc..ab5c586989f 100644 --- a/go.mod +++ b/go.mod @@ -75,6 +75,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/costexplorer v1.40.3 github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3 github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3 + github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3 github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2 diff --git a/go.sum b/go.sum index c1d6d6acb1e..ec6e76e2a1c 100644 --- a/go.sum +++ b/go.sum @@ -170,6 +170,8 @@ github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3 h1:+AiQwAYmhOXn0 github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3/go.mod h1:Om/t/NhLjZu7rYMYBI1rWyGqEUfqSn/vk/k1/7pLEC8= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3 h1:Aq+7pnVWk59dS2BMVSOEDWN0yProaw0XhaUsRGbH7MM= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3/go.mod h1:4duVgMu+RBKpiU+Hz4FjPedMLWNFVL4lhauBVYz8OZ4= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3 h1:HzdVJzMjEhQhLjUB1xGRMhs4zjaemPLUbdhhA4wfnMI= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3/go.mod h1:2SvlhcMgqPNNVr53/0m91cxPTY6mUFvp6o+Kzi63zUM= github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 h1:tFFs24+oIWlHLbTyluhnQIHaj8o4nc8yXHNnAc8PTN8= github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3/go.mod h1:WP7xXB608MyVv3yFzduKlLeYmU0AxMo7zeF9Cuwbvwc= github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 h1:ZrKMl8jsL5YHurOLf0YVLb7JBYxGtqQQAknJ5g4MTz4= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 0f9837eb7eb..3299bc3c6b4 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -67,6 +67,7 @@ import ( costexplorer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costexplorer" costoptimizationhub_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costoptimizationhub" customerprofiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/customerprofiles" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" databrew_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databrew" datasync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datasync" datazone_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datazone" @@ -204,7 +205,6 @@ import ( batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" connect_sdkv1 "github.com/aws/aws-sdk-go/service/connect" - databasemigrationservice_sdkv1 "github.com/aws/aws-sdk-go/service/databasemigrationservice" dataexchange_sdkv1 "github.com/aws/aws-sdk-go/service/dataexchange" datapipeline_sdkv1 "github.com/aws/aws-sdk-go/service/datapipeline" detective_sdkv1 "github.com/aws/aws-sdk-go/service/detective" @@ -524,8 +524,8 @@ func (c *AWSClient) DLMClient(ctx context.Context) *dlm_sdkv2.Client { return errs.Must(client[*dlm_sdkv2.Client](ctx, c, names.DLM, make(map[string]any))) } -func (c *AWSClient) DMSConn(ctx context.Context) *databasemigrationservice_sdkv1.DatabaseMigrationService { - return errs.Must(conn[*databasemigrationservice_sdkv1.DatabaseMigrationService](ctx, c, names.DMS, make(map[string]any))) +func (c *AWSClient) DMSClient(ctx context.Context) *databasemigrationservice_sdkv2.Client { + return errs.Must(client[*databasemigrationservice_sdkv2.Client](ctx, c, names.DMS, make(map[string]any))) } func (c *AWSClient) DRSClient(ctx context.Context) *drs_sdkv2.Client { diff --git a/internal/service/dms/certificate.go b/internal/service/dms/certificate.go index 8834c89d50e..616bd7b5d69 100644 --- a/internal/service/dms/certificate.go +++ b/internal/service/dms/certificate.go @@ -8,15 +8,17 @@ import ( "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -26,7 +28,7 @@ import ( // @SDKResource("aws_dms_certificate", name="Certificate") // @Tags(identifierAttribute="certificate_arn") -func ResourceCertificate() *schema.Resource { +func resourceCertificate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCertificateCreate, ReadWithoutTimeout: resourceCertificateRead, @@ -77,7 +79,7 @@ func ResourceCertificate() *schema.Resource { func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) certificateID := d.Get("certificate_id").(string) input := &dms.ImportCertificateInput{ @@ -97,7 +99,7 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta input.CertificateWallet = v } - _, err := conn.ImportCertificateWithContext(ctx, input) + _, err := conn.ImportCertificate(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Certificate (%s): %s", certificateID, err) @@ -110,9 +112,9 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - certificate, err := FindCertificateByID(ctx, conn, d.Id()) + certificate, err := findCertificateByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Certificate (%s) not found, removing from state", d.Id()) @@ -124,7 +126,15 @@ func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DMS Certificate (%s): %s", d.Id(), err) } - resourceCertificateSetState(d, certificate) + d.SetId(aws.ToString(certificate.CertificateIdentifier)) + d.Set("certificate_id", certificate.CertificateIdentifier) + d.Set(names.AttrCertificateARN, certificate.CertificateArn) + if v := aws.ToString(certificate.CertificatePem); v != "" { + d.Set("certificate_pem", v) + } + if certificate.CertificateWallet != nil && len(certificate.CertificateWallet) != 0 { + d.Set("certificate_wallet", itypes.Base64EncodeOnce(certificate.CertificateWallet)) + } return diags } @@ -139,14 +149,14 @@ func resourceCertificateUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Certificate: %s", d.Id()) - _, err := conn.DeleteCertificateWithContext(ctx, &dms.DeleteCertificateInput{ + _, err := conn.DeleteCertificate(ctx, &dms.DeleteCertificateInput{ CertificateArn: aws.String(d.Get(names.AttrCertificateARN).(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -157,26 +167,12 @@ func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func resourceCertificateSetState(d *schema.ResourceData, cert *dms.Certificate) { - d.SetId(aws.StringValue(cert.CertificateIdentifier)) - - d.Set("certificate_id", cert.CertificateIdentifier) - d.Set(names.AttrCertificateARN, cert.CertificateArn) - - if aws.StringValue(cert.CertificatePem) != "" { - d.Set("certificate_pem", cert.CertificatePem) - } - if cert.CertificateWallet != nil && len(cert.CertificateWallet) != 0 { - d.Set("certificate_wallet", itypes.Base64EncodeOnce(cert.CertificateWallet)) - } -} - -func FindCertificateByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.Certificate, error) { +func findCertificateByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.Certificate, error) { input := &dms.DescribeCertificatesInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("certificate-id"), - Values: []*string{aws.String(id)}, + Values: []string{id}, }, }, } @@ -184,7 +180,7 @@ func FindCertificateByID(ctx context.Context, conn *dms.DatabaseMigrationService return findCertificate(ctx, conn, input) } -func findCertificate(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeCertificatesInput) (*dms.Certificate, error) { +func findCertificate(ctx context.Context, conn *dms.Client, input *dms.DescribeCertificatesInput) (*awstypes.Certificate, error) { output, err := findCertificates(ctx, conn, input) if err != nil { @@ -194,33 +190,27 @@ func findCertificate(ctx context.Context, conn *dms.DatabaseMigrationService, in return tfresource.AssertSinglePtrResult(output) } -func findCertificates(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeCertificatesInput) ([]*dms.Certificate, error) { - var output []*dms.Certificate +func findCertificates(ctx context.Context, conn *dms.Client, input *dms.DescribeCertificatesInput) ([]*awstypes.Certificate, error) { + var output []awstypes.Certificate - err := conn.DescribeCertificatesPagesWithContext(ctx, input, func(page *dms.DescribeCertificatesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeCertificatesPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Certificates { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Certificates...) } - return output, nil + return tfslices.ToPointers(output), nil } diff --git a/internal/service/dms/certificate_data_source.go b/internal/service/dms/certificate_data_source.go index 470711f8a1a..02bdfbb0b70 100644 --- a/internal/service/dms/certificate_data_source.go +++ b/internal/service/dms/certificate_data_source.go @@ -7,7 +7,7 @@ import ( "context" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -18,8 +18,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_certificate") -func DataSourceCertificate() *schema.Resource { +// @SDKDataSource("aws_dms_certificate", name="Certificate") +func dataSourceCertificate() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCertificateRead, @@ -79,19 +79,19 @@ func DataSourceCertificate() *schema.Resource { func dataSourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig certificateID := d.Get("certificate_id").(string) - out, err := FindCertificateByID(ctx, conn, certificateID) + out, err := findCertificateByID(ctx, conn, certificateID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Certificate (%s): %s", certificateID, err) } - d.SetId(aws.StringValue(out.CertificateIdentifier)) - arn := aws.StringValue(out.CertificateArn) + d.SetId(aws.ToString(out.CertificateIdentifier)) + arn := aws.ToString(out.CertificateArn) d.Set(names.AttrCertificateARN, arn) d.Set("certificate_id", out.CertificateIdentifier) d.Set("certificate_pem", out.CertificatePem) @@ -104,6 +104,7 @@ func dataSourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta d.Set("valid_to_date", out.ValidToDate.String()) tags, err := listTags(ctx, conn, arn) + if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for DMS Certificate (%s): %s", arn, err) } diff --git a/internal/service/dms/certificate_test.go b/internal/service/dms/certificate_test.go index 8690c4d32df..85edc80c042 100644 --- a/internal/service/dms/certificate_test.go +++ b/internal/service/dms/certificate_test.go @@ -147,7 +147,7 @@ func testAccCheckCertificateDestroy(ctx context.Context) resource.TestCheckFunc continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindCertificateByID(ctx, conn, rs.Primary.ID) @@ -173,7 +173,7 @@ func testAccCertificateExists(ctx context.Context, n string) resource.TestCheckF return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindCertificateByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index e62faafc573..413b1a72c28 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -11,9 +11,9 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -21,6 +21,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfkms "github.com/hashicorp/terraform-provider-aws/internal/service/kms" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -31,7 +33,7 @@ import ( // @SDKResource("aws_dms_endpoint", name="Endpoint") // @Tags(identifierAttribute="endpoint_arn") -func ResourceEndpoint() *schema.Resource { +func resourceEndpoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceEndpointCreate, ReadWithoutTimeout: resourceEndpointRead, @@ -110,9 +112,9 @@ func ResourceEndpoint() *schema.Resource { ValidateFunc: validEndpointID, }, names.AttrEndpointType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.ReplicationEndpointTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ReplicationEndpointTypeValue](), }, "engine_name": { Type: schema.TypeString, @@ -163,10 +165,10 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "message_format": { - Type: schema.TypeString, - Optional: true, - Default: dms.MessageFormatValueJson, - ValidateFunc: validation.StringInSlice(dms.MessageFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.MessageFormatValueJson, + ValidateDiagFunc: enum.Validate[awstypes.MessageFormatValue](), }, "message_max_bytes": { Type: schema.TypeInt, @@ -192,9 +194,9 @@ func ResourceEndpoint() *schema.Resource { Optional: true, }, "security_protocol": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.KafkaSecurityProtocol_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.KafkaSecurityProtocol](), }, "ssl_ca_certificate_arn": { Type: schema.TypeString, @@ -257,11 +259,11 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "message_format": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: dms.MessageFormatValueJson, - ValidateFunc: validation.StringInSlice(dms.MessageFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.MessageFormatValueJson, + ValidateDiagFunc: enum.Validate[awstypes.MessageFormatValue](), }, "partition_include_schema_table": { Type: schema.TypeBool, @@ -307,10 +309,10 @@ func ResourceEndpoint() *schema.Resource { Default: mongoDBAuthSourceAdmin, }, "auth_type": { - Type: schema.TypeString, - Optional: true, - Default: dms.AuthTypeValuePassword, - ValidateFunc: validation.StringInSlice(dms.AuthTypeValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.AuthTypeValuePassword, + ValidateDiagFunc: enum.Validate[awstypes.AuthTypeValue](), }, "docs_to_investigate": { Type: schema.TypeString, @@ -323,10 +325,10 @@ func ResourceEndpoint() *schema.Resource { Default: "false", }, "nesting_level": { - Type: schema.TypeString, - Optional: true, - Default: dms.NestingLevelValueNone, - ValidateFunc: validation.StringInSlice(dms.NestingLevelValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.NestingLevelValueNone, + ValidateDiagFunc: enum.Validate[awstypes.NestingLevelValue](), }, }, }, @@ -433,9 +435,9 @@ func ResourceEndpoint() *schema.Resource { Sensitive: true, }, "auth_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.RedisAuthTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RedisAuthTypeValue](), }, "auth_user_name": { Type: schema.TypeString, @@ -455,10 +457,10 @@ func ResourceEndpoint() *schema.Resource { Optional: true, }, "ssl_security_protocol": { - Type: schema.TypeString, - Optional: true, - Default: dms.SslSecurityProtocolValueSslEncryption, - ValidateFunc: validation.StringInSlice(dms.SslSecurityProtocolValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.SslSecurityProtocolValueSslEncryption, + ValidateDiagFunc: enum.Validate[awstypes.SslSecurityProtocolValue](), }, }, }, @@ -523,10 +525,10 @@ func ResourceEndpoint() *schema.Resource { Default: "", }, "canned_acl_for_objects": { - Type: schema.TypeString, - Optional: true, - Default: dms.CannedAclForObjectsValueNone, - ValidateFunc: validation.StringInSlice(dms.CannedAclForObjectsValue_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.CannedAclForObjectsValueNone, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.CannedAclForObjectsValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -585,10 +587,10 @@ func ResourceEndpoint() *schema.Resource { Default: "\\n", }, "data_format": { - Type: schema.TypeString, - Optional: true, - Default: dms.DataFormatValueCsv, - ValidateFunc: validation.StringInSlice(dms.DataFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DataFormatValueCsv, + ValidateDiagFunc: enum.Validate[awstypes.DataFormatValue](), }, "data_page_size": { Type: schema.TypeInt, @@ -597,10 +599,10 @@ func ResourceEndpoint() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "date_partition_delimiter": { - Type: schema.TypeString, - Optional: true, - Default: dms.DatePartitionDelimiterValueSlash, - ValidateFunc: validation.StringInSlice(dms.DatePartitionDelimiterValue_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DatePartitionDelimiterValueSlash, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionDelimiterValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -611,10 +613,10 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "date_partition_sequence": { - Type: schema.TypeString, - Optional: true, - Default: dms.DatePartitionSequenceValueYyyymmdd, - ValidateFunc: validation.StringInSlice(dms.DatePartitionSequenceValue_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DatePartitionSequenceValueYyyymmdd, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionSequenceValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -631,10 +633,10 @@ func ResourceEndpoint() *schema.Resource { Default: true, }, "encoding_type": { - Type: schema.TypeString, - Optional: true, - Default: dms.EncodingTypeValueRleDictionary, - ValidateFunc: validation.StringInSlice(dms.EncodingTypeValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.EncodingTypeValueRleDictionary, + ValidateDiagFunc: enum.Validate[awstypes.EncodingTypeValue](), }, "encryption_mode": { Type: schema.TypeString, @@ -675,10 +677,10 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "parquet_version": { - Type: schema.TypeString, - Optional: true, - Default: dms.ParquetVersionValueParquet10, - ValidateFunc: validation.StringInSlice(dms.ParquetVersionValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ParquetVersionValueParquet10, + ValidateDiagFunc: enum.Validate[awstypes.ParquetVersionValue](), }, "preserve_transactions": { Type: schema.TypeBool, @@ -750,10 +752,10 @@ func ResourceEndpoint() *schema.Resource { Optional: true, }, "ssl_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DmsSslModeValue_Values(), false), + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DmsSslModeValue](), }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), @@ -776,12 +778,12 @@ func ResourceEndpoint() *schema.Resource { func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) endpointID := d.Get("endpoint_id").(string) input := &dms.CreateEndpointInput{ EndpointIdentifier: aws.String(endpointID), - EndpointType: aws.String(d.Get(names.AttrEndpointType).(string)), + EndpointType: awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)), EngineName: aws.String(d.Get("engine_name").(string)), Tags: getTagsIn(ctx), } @@ -801,22 +803,22 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("ssl_mode"); ok { - input.SslMode = aws.String(v.(string)) + input.SslMode = awstypes.DmsSslModeValue(v.(string)) } switch d.Get("engine_name").(string) { case engineNameAurora, engineNameMariadb, engineNameMySQL: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -824,7 +826,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in expandTopLevelConnectionInfo(d, input) } case engineNameAuroraPostgresql, engineNamePostgres: - settings := &dms.PostgreSQLSettings{} + settings := &awstypes.PostgreSQLSettings{} if _, ok := d.GetOk("postgres_settings"); ok { settings = expandPostgreSQLSettings(d.Get("postgres_settings").([]interface{})[0].(map[string]interface{})) } @@ -837,7 +839,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.Username = aws.String(d.Get(names.AttrUsername).(string)) settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) - settings.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) // Set connection info in top-level namespace as well @@ -846,15 +848,15 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in input.PostgreSQLSettings = settings case engineNameDynamoDB: - input.DynamoDbSettings = &dms.DynamoDbSettings{ + input.DynamoDbSettings = &awstypes.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } case engineNameElasticsearch, engineNameOpenSearch: - input.ElasticsearchSettings = &dms.ElasticsearchSettings{ + input.ElasticsearchSettings = &awstypes.ElasticsearchSettings{ ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), - ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), - FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), + ErrorRetryDuration: aws.Int32(int32(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), + FullLoadErrorPercentage: aws.Int32(int32(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } case engineNameKafka: @@ -862,7 +864,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in case engineNameKinesis: input.KinesisSettings = expandKinesisSettings(d.Get("kinesis_settings").([]interface{})[0].(map[string]interface{})) case engineNameMongodb: - var settings = &dms.MongoDbSettings{} + var settings = &awstypes.MongoDbSettings{} if _, ok := d.GetOk("secrets_manager_arn"); ok { settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) @@ -871,7 +873,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.Username = aws.String(d.Get(names.AttrUsername).(string)) settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) - settings.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, input) @@ -879,9 +881,9 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) settings.KmsKeyId = aws.String(d.Get(names.AttrKMSKeyARN).(string)) - settings.AuthType = aws.String(d.Get("mongodb_settings.0.auth_type").(string)) - settings.AuthMechanism = aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)) - settings.NestingLevel = aws.String(d.Get("mongodb_settings.0.nesting_level").(string)) + settings.AuthType = awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)) + settings.AuthMechanism = awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)) + settings.NestingLevel = awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)) settings.ExtractDocId = aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)) settings.DocsToInvestigate = aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)) settings.AuthSource = aws.String(d.Get("mongodb_settings.0.auth_source").(string)) @@ -889,17 +891,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in input.MongoDbSettings = settings case engineNameOracle: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -909,7 +911,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in case engineNameRedis: input.RedisSettings = expandRedisSettings(d.Get("redis_settings").([]interface{})[0].(map[string]interface{})) case engineNameRedshift: - var settings = &dms.RedshiftSettings{ + var settings = &awstypes.RedshiftSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -920,7 +922,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.Username = aws.String(d.Get(names.AttrUsername).(string)) settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) - settings.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, input) @@ -938,7 +940,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { - settings.EncryptionMode = aws.String(v) + settings.EncryptionMode = awstypes.EncryptionModeValue(v) } if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { @@ -953,17 +955,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in input.RedshiftSettings = settings case engineNameSQLServer, engineNameBabelfish: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -972,17 +974,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } case engineNameSybase: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -991,17 +993,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } case engineNameDB2, engineNameDB2zOS: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -1014,11 +1016,10 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in expandTopLevelConnectionInfo(d, input) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), + _, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.CreateEndpointWithContext(ctx, input) - }, - dms.ErrCodeAccessDeniedFault) + return conn.CreateEndpoint(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Endpoint (%s): %s", endpointID, err) @@ -1031,9 +1032,9 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - endpoint, err := FindEndpointByID(ctx, conn, d.Id()) + endpoint, err := findEndpointByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Endpoint (%s) not found, removing from state", d.Id()) @@ -1054,12 +1055,12 @@ func resourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { endpointARN := d.Get("endpoint_arn").(string) pauseTasks := d.Get("pause_replication_tasks").(bool) - var tasks []*dms.ReplicationTask + var tasks []awstypes.ReplicationTask if pauseTasks { var err error @@ -1080,7 +1081,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if d.HasChange(names.AttrEndpointType) { - input.EndpointType = aws.String(d.Get(names.AttrEndpointType).(string)) + input.EndpointType = awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)) } if d.HasChange("engine_name") { @@ -1092,13 +1093,13 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if d.HasChange("service_access_role") { - input.DynamoDbSettings = &dms.DynamoDbSettings{ + input.DynamoDbSettings = &awstypes.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } } if d.HasChange("ssl_mode") { - input.SslMode = aws.String(d.Get("ssl_mode").(string)) + input.SslMode = awstypes.DmsSslModeValue(d.Get("ssl_mode").(string)) } switch engineName := d.Get("engine_name").(string); engineName { @@ -1107,16 +1108,16 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) @@ -1130,17 +1131,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ + input.PostgreSQLSettings = &awstypes.PostgreSQLSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ + input.PostgreSQLSettings = &awstypes.PostgreSQLSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') @@ -1151,7 +1152,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } case engineNameDynamoDB: if d.HasChange("service_access_role") { - input.DynamoDbSettings = &dms.DynamoDbSettings{ + input.DynamoDbSettings = &awstypes.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } } @@ -1162,11 +1163,11 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in "elasticsearch_settings.0.full_load_error_percentage", "elasticsearch_settings.0.service_access_role_arn", "elasticsearch_settings.0.use_new_mapping_type") { - input.ElasticsearchSettings = &dms.ElasticsearchSettings{ + input.ElasticsearchSettings = &awstypes.ElasticsearchSettings{ ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), - ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), - FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), + ErrorRetryDuration: aws.Int32(int32(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), + FullLoadErrorPercentage: aws.Int32(int32(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } input.EngineName = aws.String(engineName) @@ -1188,31 +1189,31 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in "mongodb_settings.0.docs_to_investigate", "mongodb_settings.0.auth_source", "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MongoDbSettings = &dms.MongoDbSettings{ + input.MongoDbSettings = &awstypes.MongoDbSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), KmsKeyId: aws.String(d.Get(names.AttrKMSKeyARN).(string)), - AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), + AuthType: awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)), + AuthMechanism: awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)), + NestingLevel: awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)), ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), } } else { - input.MongoDbSettings = &dms.MongoDbSettings{ + input.MongoDbSettings = &awstypes.MongoDbSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), KmsKeyId: aws.String(d.Get(names.AttrKMSKeyARN).(string)), - AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), + AuthType: awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)), + AuthMechanism: awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)), + NestingLevel: awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)), ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), @@ -1228,17 +1229,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'oracle') @@ -1258,17 +1259,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in "redshift_settings", "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.RedshiftSettings = &dms.RedshiftSettings{ + input.RedshiftSettings = &awstypes.RedshiftSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.RedshiftSettings = &dms.RedshiftSettings{ + input.RedshiftSettings = &awstypes.RedshiftSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'redshift') @@ -1288,7 +1289,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { - input.RedshiftSettings.EncryptionMode = aws.String(v) + input.RedshiftSettings.EncryptionMode = awstypes.EncryptionModeValue(v) } if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { @@ -1306,17 +1307,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') @@ -1330,17 +1331,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') @@ -1354,17 +1355,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'db2') @@ -1388,7 +1389,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if d.HasChange(names.AttrPort) { - input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) } if d.HasChange("server_name") { @@ -1400,7 +1401,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } } - _, err := conn.ModifyEndpointWithContext(ctx, input) + _, err := conn.ModifyEndpoint(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DMS Endpoint (%s): %s", d.Id(), err) @@ -1419,14 +1420,14 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceEndpointDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Endpoint: (%s)", d.Id()) - _, err := conn.DeleteEndpointWithContext(ctx, &dms.DeleteEndpointInput{ + _, err := conn.DeleteEndpoint(ctx, &dms.DeleteEndpointInput{ EndpointArn: aws.String(d.Get("endpoint_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -1434,7 +1435,7 @@ func resourceEndpointDelete(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "deleting DMS Endpoint (%s): %s", d.Id(), err) } - if err = waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if _, err := waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for DMS Endpoint (%s) delete: %s", d.Id(), err) } @@ -1527,18 +1528,18 @@ func validateSSEKMSKey(settingsAttrName string, d *schema.ResourceDiff) error { return nil } -func resourceEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) error { - d.SetId(aws.StringValue(endpoint.EndpointIdentifier)) +func resourceEndpointSetState(d *schema.ResourceData, endpoint *awstypes.Endpoint) error { + d.SetId(aws.ToString(endpoint.EndpointIdentifier)) d.Set(names.AttrCertificateARN, endpoint.CertificateArn) d.Set("endpoint_arn", endpoint.EndpointArn) d.Set("endpoint_id", endpoint.EndpointIdentifier) // For some reason the AWS API only accepts lowercase type but returns it as uppercase - d.Set(names.AttrEndpointType, strings.ToLower(aws.StringValue(endpoint.EndpointType))) + d.Set(names.AttrEndpointType, strings.ToLower(string(endpoint.EndpointType))) d.Set("engine_name", endpoint.EngineName) d.Set("extra_connection_attributes", endpoint.ExtraConnectionAttributes) - switch aws.StringValue(endpoint.EngineName) { + switch aws.ToString(endpoint.EngineName) { case engineNameAurora, engineNameMariadb, engineNameMySQL: if endpoint.MySQLSettings != nil { d.Set(names.AttrUsername, endpoint.MySQLSettings.Username) @@ -1687,15 +1688,16 @@ func resourceEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) er return nil } -func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) error { +func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.Client, arn string) error { tasks, err := findReplicationTasksByEndpointARN(ctx, conn, arn) + if err != nil { return err } for _, task := range tasks { - rtID := aws.StringValue(task.ReplicationTaskIdentifier) - switch aws.StringValue(task.Status) { + rtID := aws.ToString(task.ReplicationTaskIdentifier) + switch aws.ToString(task.Status) { case replicationTaskStatusRunning, replicationTaskStatusFailed, replicationTaskStatusReady, replicationTaskStatusStopped: continue case replicationTaskStatusCreating, replicationTaskStatusDeleting, replicationTaskStatusModifying, replicationTaskStatusStopping, replicationTaskStatusStarting: @@ -1708,20 +1710,21 @@ func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigra return nil } -func stopEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) ([]*dms.ReplicationTask, error) { +func stopEndpointReplicationTasks(ctx context.Context, conn *dms.Client, arn string) ([]awstypes.ReplicationTask, error) { if err := steadyEndpointReplicationTasks(ctx, conn, arn); err != nil { return nil, err } tasks, err := findReplicationTasksByEndpointARN(ctx, conn, arn) + if err != nil { return nil, err } - var stoppedTasks []*dms.ReplicationTask + var stoppedTasks []awstypes.ReplicationTask for _, task := range tasks { - rtID := aws.StringValue(task.ReplicationTaskIdentifier) - switch aws.StringValue(task.Status) { + rtID := aws.ToString(task.ReplicationTaskIdentifier) + switch aws.ToString(task.Status) { case replicationTaskStatusRunning: err := stopReplicationTask(ctx, conn, rtID) @@ -1737,7 +1740,9 @@ func stopEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrati return stoppedTasks, nil } -func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, tasks []*dms.ReplicationTask) error { +func startEndpointReplicationTasks(ctx context.Context, conn *dms.Client, arn string, tasks []awstypes.ReplicationTask) error { + const maxConnTestWaitTime = 120 * time.Second + if len(tasks) == 0 { return nil } @@ -1747,12 +1752,12 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat } for _, task := range tasks { - _, err := conn.TestConnectionWithContext(ctx, &dms.TestConnectionInput{ + _, err := conn.TestConnection(ctx, &dms.TestConnectionInput{ EndpointArn: aws.String(arn), ReplicationInstanceArn: task.ReplicationInstanceArn, }) - if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "already being tested") { + if errs.IsAErrorMessageContains[*awstypes.InvalidResourceStateFault](err, "already being tested") { continue } @@ -1760,20 +1765,22 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat return fmt.Errorf("testing connection: %w", err) } - err = conn.WaitUntilTestConnectionSucceedsWithContext(ctx, &dms.DescribeConnectionsInput{ - Filters: []*dms.Filter{ + waiter := dms.NewTestConnectionSucceedsWaiter(conn) + + err = waiter.Wait(ctx, &dms.DescribeConnectionsInput{ + Filters: []awstypes.Filter{ { Name: aws.String("endpoint-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }, }, - }) + }, maxConnTestWaitTime) if err != nil { return fmt.Errorf("waiting until test connection succeeds: %w", err) } - if err := startReplicationTask(ctx, conn, aws.StringValue(task.ReplicationTaskIdentifier)); err != nil { + if err := startReplicationTask(ctx, conn, aws.ToString(task.ReplicationTaskIdentifier)); err != nil { return fmt.Errorf("starting replication task: %w", err) } } @@ -1781,12 +1788,12 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat return nil } -func findReplicationTasksByEndpointARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) ([]*dms.ReplicationTask, error) { +func findReplicationTasksByEndpointARN(ctx context.Context, conn *dms.Client, arn string) ([]awstypes.ReplicationTask, error) { input := &dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("endpoint-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }, }, } @@ -1794,28 +1801,28 @@ func findReplicationTasksByEndpointARN(ctx context.Context, conn *dms.DatabaseMi return findReplicationTasks(ctx, conn, input) } -func flattenOpenSearchSettings(settings *dms.ElasticsearchSettings) []map[string]interface{} { +func flattenOpenSearchSettings(settings *awstypes.ElasticsearchSettings) []map[string]interface{} { if settings == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "endpoint_uri": aws.StringValue(settings.EndpointUri), - "error_retry_duration": aws.Int64Value(settings.ErrorRetryDuration), - "full_load_error_percentage": aws.Int64Value(settings.FullLoadErrorPercentage), - "service_access_role_arn": aws.StringValue(settings.ServiceAccessRoleArn), - "use_new_mapping_type": aws.BoolValue(settings.UseNewMappingType), + "endpoint_uri": aws.ToString(settings.EndpointUri), + "error_retry_duration": aws.ToInt32(settings.ErrorRetryDuration), + "full_load_error_percentage": aws.ToInt32(settings.FullLoadErrorPercentage), + "service_access_role_arn": aws.ToString(settings.ServiceAccessRoleArn), + "use_new_mapping_type": aws.ToBool(settings.UseNewMappingType), } return []map[string]interface{}{m} } -func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { +func expandKafkaSettings(tfMap map[string]interface{}) *awstypes.KafkaSettings { if tfMap == nil { return nil } - apiObject := &dms.KafkaSettings{} + apiObject := &awstypes.KafkaSettings{} if v, ok := tfMap["broker"].(string); ok && v != "" { apiObject.Broker = aws.String(v) @@ -1842,11 +1849,11 @@ func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { } if v, ok := tfMap["message_format"].(string); ok && v != "" { - apiObject.MessageFormat = aws.String(v) + apiObject.MessageFormat = awstypes.MessageFormatValue(v) } if v, ok := tfMap["message_max_bytes"].(int); ok && v != 0 { - apiObject.MessageMaxBytes = aws.Int64(int64(v)) + apiObject.MessageMaxBytes = aws.Int32(int32(v)) } if v, ok := tfMap["no_hex_prefix"].(bool); ok { @@ -1866,7 +1873,7 @@ func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { } if v, ok := tfMap["security_protocol"].(string); ok && v != "" { - apiObject.SecurityProtocol = aws.String(v) + apiObject.SecurityProtocol = awstypes.KafkaSecurityProtocol(v) } if v, ok := tfMap["ssl_ca_certificate_arn"].(string); ok && v != "" { @@ -1892,7 +1899,7 @@ func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { return apiObject } -func flattenKafkaSettings(apiObject *dms.KafkaSettings) map[string]interface{} { +func flattenKafkaSettings(apiObject *awstypes.KafkaSettings) map[string]interface{} { if apiObject == nil { return nil } @@ -1900,86 +1907,82 @@ func flattenKafkaSettings(apiObject *dms.KafkaSettings) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Broker; v != nil { - tfMap["broker"] = aws.StringValue(v) + tfMap["broker"] = aws.ToString(v) } if v := apiObject.IncludeControlDetails; v != nil { - tfMap["include_control_details"] = aws.BoolValue(v) + tfMap["include_control_details"] = aws.ToBool(v) } if v := apiObject.IncludeNullAndEmpty; v != nil { - tfMap["include_null_and_empty"] = aws.BoolValue(v) + tfMap["include_null_and_empty"] = aws.ToBool(v) } if v := apiObject.IncludePartitionValue; v != nil { - tfMap["include_partition_value"] = aws.BoolValue(v) + tfMap["include_partition_value"] = aws.ToBool(v) } if v := apiObject.IncludeTableAlterOperations; v != nil { - tfMap["include_table_alter_operations"] = aws.BoolValue(v) + tfMap["include_table_alter_operations"] = aws.ToBool(v) } if v := apiObject.IncludeTransactionDetails; v != nil { - tfMap["include_transaction_details"] = aws.BoolValue(v) + tfMap["include_transaction_details"] = aws.ToBool(v) } - if v := apiObject.MessageFormat; v != nil { - tfMap["message_format"] = aws.StringValue(v) - } + tfMap["message_format"] = string(apiObject.MessageFormat) if v := apiObject.MessageMaxBytes; v != nil { - tfMap["message_max_bytes"] = aws.Int64Value(v) + tfMap["message_max_bytes"] = aws.ToInt32(v) } if v := apiObject.NoHexPrefix; v != nil { - tfMap["no_hex_prefix"] = aws.BoolValue(v) + tfMap["no_hex_prefix"] = aws.ToBool(v) } if v := apiObject.PartitionIncludeSchemaTable; v != nil { - tfMap["partition_include_schema_table"] = aws.BoolValue(v) + tfMap["partition_include_schema_table"] = aws.ToBool(v) } if v := apiObject.SaslPassword; v != nil { - tfMap["sasl_password"] = aws.StringValue(v) + tfMap["sasl_password"] = aws.ToString(v) } if v := apiObject.SaslUsername; v != nil { - tfMap["sasl_username"] = aws.StringValue(v) + tfMap["sasl_username"] = aws.ToString(v) } - if v := apiObject.SecurityProtocol; v != nil { - tfMap["security_protocol"] = aws.StringValue(v) - } + tfMap["security_protocol"] = string(apiObject.SecurityProtocol) if v := apiObject.SslCaCertificateArn; v != nil { - tfMap["ssl_ca_certificate_arn"] = aws.StringValue(v) + tfMap["ssl_ca_certificate_arn"] = aws.ToString(v) } if v := apiObject.SslClientCertificateArn; v != nil { - tfMap["ssl_client_certificate_arn"] = aws.StringValue(v) + tfMap["ssl_client_certificate_arn"] = aws.ToString(v) } if v := apiObject.SslClientKeyArn; v != nil { - tfMap["ssl_client_key_arn"] = aws.StringValue(v) + tfMap["ssl_client_key_arn"] = aws.ToString(v) } if v := apiObject.SslClientKeyPassword; v != nil { - tfMap["ssl_client_key_password"] = aws.StringValue(v) + tfMap["ssl_client_key_password"] = aws.ToString(v) } if v := apiObject.Topic; v != nil { - tfMap["topic"] = aws.StringValue(v) + tfMap["topic"] = aws.ToString(v) } return tfMap } -func expandKinesisSettings(tfMap map[string]interface{}) *dms.KinesisSettings { +func expandKinesisSettings(tfMap map[string]interface{}) *awstypes.KinesisSettings { if tfMap == nil { return nil } - apiObject := &dms.KinesisSettings{} + apiObject := &awstypes.KinesisSettings{} if v, ok := tfMap["include_control_details"].(bool); ok { apiObject.IncludeControlDetails = aws.Bool(v) @@ -2002,7 +2005,7 @@ func expandKinesisSettings(tfMap map[string]interface{}) *dms.KinesisSettings { } if v, ok := tfMap["message_format"].(string); ok && v != "" { - apiObject.MessageFormat = aws.String(v) + apiObject.MessageFormat = awstypes.MessageFormatValue(v) } if v, ok := tfMap["partition_include_schema_table"].(bool); ok { @@ -2020,7 +2023,7 @@ func expandKinesisSettings(tfMap map[string]interface{}) *dms.KinesisSettings { return apiObject } -func flattenKinesisSettings(apiObject *dms.KinesisSettings) map[string]interface{} { +func flattenKinesisSettings(apiObject *awstypes.KinesisSettings) map[string]interface{} { if apiObject == nil { return nil } @@ -2028,79 +2031,77 @@ func flattenKinesisSettings(apiObject *dms.KinesisSettings) map[string]interface tfMap := map[string]interface{}{} if v := apiObject.IncludeControlDetails; v != nil { - tfMap["include_control_details"] = aws.BoolValue(v) + tfMap["include_control_details"] = aws.ToBool(v) } if v := apiObject.IncludeNullAndEmpty; v != nil { - tfMap["include_null_and_empty"] = aws.BoolValue(v) + tfMap["include_null_and_empty"] = aws.ToBool(v) } if v := apiObject.IncludePartitionValue; v != nil { - tfMap["include_partition_value"] = aws.BoolValue(v) + tfMap["include_partition_value"] = aws.ToBool(v) } if v := apiObject.IncludeTableAlterOperations; v != nil { - tfMap["include_table_alter_operations"] = aws.BoolValue(v) + tfMap["include_table_alter_operations"] = aws.ToBool(v) } if v := apiObject.IncludeTransactionDetails; v != nil { - tfMap["include_transaction_details"] = aws.BoolValue(v) + tfMap["include_transaction_details"] = aws.ToBool(v) } - if v := apiObject.MessageFormat; v != nil { - tfMap["message_format"] = aws.StringValue(v) - } + tfMap["message_format"] = string(apiObject.MessageFormat) if v := apiObject.PartitionIncludeSchemaTable; v != nil { - tfMap["partition_include_schema_table"] = aws.BoolValue(v) + tfMap["partition_include_schema_table"] = aws.ToBool(v) } if v := apiObject.ServiceAccessRoleArn; v != nil { - tfMap["service_access_role_arn"] = aws.StringValue(v) + tfMap["service_access_role_arn"] = aws.ToString(v) } if v := apiObject.StreamArn; v != nil { - tfMap[names.AttrStreamARN] = aws.StringValue(v) + tfMap[names.AttrStreamARN] = aws.ToString(v) } return tfMap } -func flattenMongoDBSettings(settings *dms.MongoDbSettings) []map[string]interface{} { +func flattenMongoDBSettings(settings *awstypes.MongoDbSettings) []map[string]interface{} { if settings == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "auth_type": aws.StringValue(settings.AuthType), - "auth_mechanism": aws.StringValue(settings.AuthMechanism), - "nesting_level": aws.StringValue(settings.NestingLevel), - "extract_doc_id": aws.StringValue(settings.ExtractDocId), - "docs_to_investigate": aws.StringValue(settings.DocsToInvestigate), - "auth_source": aws.StringValue(settings.AuthSource), + "auth_type": string(settings.AuthType), + "auth_mechanism": string(settings.AuthMechanism), + "nesting_level": string(settings.NestingLevel), + "extract_doc_id": aws.ToString(settings.ExtractDocId), + "docs_to_investigate": aws.ToString(settings.DocsToInvestigate), + "auth_source": aws.ToString(settings.AuthSource), } return []map[string]interface{}{m} } -func expandRedisSettings(tfMap map[string]interface{}) *dms.RedisSettings { +func expandRedisSettings(tfMap map[string]interface{}) *awstypes.RedisSettings { if tfMap == nil { return nil } - apiObject := &dms.RedisSettings{} + apiObject := &awstypes.RedisSettings{} if v, ok := tfMap["auth_password"].(string); ok && v != "" { apiObject.AuthPassword = aws.String(v) } if v, ok := tfMap["auth_type"].(string); ok && v != "" { - apiObject.AuthType = aws.String(v) + apiObject.AuthType = awstypes.RedisAuthTypeValue(v) } if v, ok := tfMap["auth_user_name"].(string); ok && v != "" { apiObject.AuthUserName = aws.String(v) } if v, ok := tfMap[names.AttrPort].(int); ok { - apiObject.Port = aws.Int64(int64(v)) + apiObject.Port = int32(v) } if v, ok := tfMap["server_name"].(string); ok && v != "" { apiObject.ServerName = aws.String(v) @@ -2109,13 +2110,13 @@ func expandRedisSettings(tfMap map[string]interface{}) *dms.RedisSettings { apiObject.SslCaCertificateArn = aws.String(v) } if v, ok := tfMap["ssl_security_protocol"].(string); ok && v != "" { - apiObject.SslSecurityProtocol = aws.String(v) + apiObject.SslSecurityProtocol = awstypes.SslSecurityProtocolValue(v) } return apiObject } -func flattenRedisSettings(apiObject *dms.RedisSettings) map[string]interface{} { +func flattenRedisSettings(apiObject *awstypes.RedisSettings) map[string]interface{} { if apiObject == nil { return nil } @@ -2123,52 +2124,45 @@ func flattenRedisSettings(apiObject *dms.RedisSettings) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.AuthPassword; v != nil { - tfMap["auth_password"] = aws.StringValue(v) - } - if v := apiObject.AuthType; v != nil { - tfMap["auth_type"] = aws.StringValue(v) + tfMap["auth_password"] = aws.ToString(v) } + tfMap["auth_type"] = string(apiObject.AuthType) if v := apiObject.AuthUserName; v != nil { - tfMap["auth_user_name"] = aws.StringValue(v) - } - if v := apiObject.Port; v != nil { - tfMap[names.AttrPort] = aws.Int64Value(v) + tfMap["auth_user_name"] = aws.ToString(v) } + tfMap[names.AttrPort] = apiObject.Port if v := apiObject.ServerName; v != nil { - tfMap["server_name"] = aws.StringValue(v) + tfMap["server_name"] = aws.ToString(v) } if v := apiObject.SslCaCertificateArn; v != nil { - tfMap["ssl_ca_certificate_arn"] = aws.StringValue(v) - } - if v := apiObject.SslSecurityProtocol; v != nil { - tfMap["ssl_security_protocol"] = aws.StringValue(v) + tfMap["ssl_ca_certificate_arn"] = aws.ToString(v) } - + tfMap["ssl_security_protocol"] = string(apiObject.SslSecurityProtocol) return tfMap } -func flattenRedshiftSettings(settings *dms.RedshiftSettings) []map[string]interface{} { +func flattenRedshiftSettings(settings *awstypes.RedshiftSettings) []map[string]interface{} { if settings == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "bucket_folder": aws.StringValue(settings.BucketFolder), - names.AttrBucketName: aws.StringValue(settings.BucketName), - "encryption_mode": aws.StringValue(settings.EncryptionMode), - "server_side_encryption_kms_key_id": aws.StringValue(settings.ServerSideEncryptionKmsKeyId), - "service_access_role_arn": aws.StringValue(settings.ServiceAccessRoleArn), + "bucket_folder": aws.ToString(settings.BucketFolder), + names.AttrBucketName: aws.ToString(settings.BucketName), + "encryption_mode": string(settings.EncryptionMode), + "server_side_encryption_kms_key_id": aws.ToString(settings.ServerSideEncryptionKmsKeyId), + "service_access_role_arn": aws.ToString(settings.ServiceAccessRoleArn), } return []map[string]interface{}{m} } -func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSettings { +func expandPostgreSQLSettings(tfMap map[string]interface{}) *awstypes.PostgreSQLSettings { if tfMap == nil { return nil } - apiObject := &dms.PostgreSQLSettings{} + apiObject := &awstypes.PostgreSQLSettings{} if v, ok := tfMap["after_connect_script"].(string); ok && v != "" { apiObject.AfterConnectScript = aws.String(v) @@ -2180,13 +2174,13 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti apiObject.CaptureDdls = aws.Bool(v) } if v, ok := tfMap["database_mode"].(string); ok && v != "" { - apiObject.DatabaseMode = aws.String(v) + apiObject.DatabaseMode = awstypes.DatabaseMode(v) } if v, ok := tfMap["ddl_artifacts_schema"].(string); ok && v != "" { apiObject.DdlArtifactsSchema = aws.String(v) } if v, ok := tfMap["execute_timeout"].(int); ok { - apiObject.ExecuteTimeout = aws.Int64(int64(v)) + apiObject.ExecuteTimeout = aws.Int32(int32(v)) } if v, ok := tfMap["fail_tasks_on_lob_truncation"].(bool); ok { apiObject.FailTasksOnLobTruncation = aws.Bool(v) @@ -2195,7 +2189,7 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti apiObject.HeartbeatEnable = aws.Bool(v) } if v, ok := tfMap["heartbeat_frequency"].(int); ok { - apiObject.HeartbeatFrequency = aws.Int64(int64(v)) + apiObject.HeartbeatFrequency = aws.Int32(int32(v)) } if v, ok := tfMap["heartbeat_schema"].(string); ok && v != "" { apiObject.HeartbeatSchema = aws.String(v) @@ -2207,13 +2201,13 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti apiObject.MapJsonbAsClob = aws.Bool(v) } if v, ok := tfMap["map_long_varchar_as"].(string); ok && v != "" { - apiObject.MapLongVarcharAs = aws.String(v) + apiObject.MapLongVarcharAs = awstypes.LongVarcharMappingType(v) } if v, ok := tfMap["max_file_size"].(int); ok { - apiObject.MaxFileSize = aws.Int64(int64(v)) + apiObject.MaxFileSize = aws.Int32(int32(v)) } if v, ok := tfMap["plugin_name"].(string); ok && v != "" { - apiObject.PluginName = aws.String(v) + apiObject.PluginName = awstypes.PluginNameValue(v) } if v, ok := tfMap["slot_name"].(string); ok && v != "" { apiObject.SlotName = aws.String(v) @@ -2222,7 +2216,7 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti return apiObject } -func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) []map[string]interface{} { +func flattenPostgreSQLSettings(apiObject *awstypes.PostgreSQLSettings) []map[string]interface{} { if apiObject == nil { return nil } @@ -2230,63 +2224,57 @@ func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) []map[string]i tfMap := map[string]interface{}{} if v := apiObject.AfterConnectScript; v != nil { - tfMap["after_connect_script"] = aws.StringValue(v) + tfMap["after_connect_script"] = aws.ToString(v) } if v := apiObject.BabelfishDatabaseName; v != nil { - tfMap["babelfish_database_name"] = aws.StringValue(v) + tfMap["babelfish_database_name"] = aws.ToString(v) } if v := apiObject.CaptureDdls; v != nil { - tfMap["capture_ddls"] = aws.BoolValue(v) - } - if v := apiObject.DatabaseMode; v != nil { - tfMap["database_mode"] = aws.StringValue(v) + tfMap["capture_ddls"] = aws.ToBool(v) } + tfMap["database_mode"] = string(apiObject.DatabaseMode) if v := apiObject.DdlArtifactsSchema; v != nil { - tfMap["ddl_artifacts_schema"] = aws.StringValue(v) + tfMap["ddl_artifacts_schema"] = aws.ToString(v) } if v := apiObject.ExecuteTimeout; v != nil { - tfMap["execute_timeout"] = aws.Int64Value(v) + tfMap["execute_timeout"] = aws.ToInt32(v) } if v := apiObject.FailTasksOnLobTruncation; v != nil { - tfMap["fail_tasks_on_lob_truncation"] = aws.BoolValue(v) + tfMap["fail_tasks_on_lob_truncation"] = aws.ToBool(v) } if v := apiObject.HeartbeatEnable; v != nil { - tfMap["heartbeat_enable"] = aws.BoolValue(v) + tfMap["heartbeat_enable"] = aws.ToBool(v) } if v := apiObject.HeartbeatFrequency; v != nil { - tfMap["heartbeat_frequency"] = aws.Int64Value(v) + tfMap["heartbeat_frequency"] = aws.ToInt32(v) } if v := apiObject.HeartbeatSchema; v != nil { - tfMap["heartbeat_schema"] = aws.StringValue(v) + tfMap["heartbeat_schema"] = aws.ToString(v) } if v := apiObject.MapBooleanAsBoolean; v != nil { - tfMap["map_boolean_as_boolean"] = aws.BoolValue(v) + tfMap["map_boolean_as_boolean"] = aws.ToBool(v) } if v := apiObject.MapJsonbAsClob; v != nil { - tfMap["map_jsonb_as_clob"] = aws.BoolValue(v) - } - if v := apiObject.MapLongVarcharAs; v != nil { - tfMap["map_long_varchar_as"] = aws.StringValue(v) + tfMap["map_jsonb_as_clob"] = aws.ToBool(v) } + tfMap["map_long_varchar_as"] = string(apiObject.MapLongVarcharAs) if v := apiObject.MaxFileSize; v != nil { - tfMap["max_file_size"] = aws.Int64Value(v) - } - if v := apiObject.PluginName; v != nil { - tfMap["plugin_name"] = aws.StringValue(v) + tfMap["max_file_size"] = aws.ToInt32(v) } + tfMap["plugin_name"] = string(apiObject.PluginName) if v := apiObject.SlotName; v != nil { - tfMap["slot_name"] = aws.StringValue(v) + tfMap["slot_name"] = aws.ToString(v) } return []map[string]interface{}{tfMap} } -func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { +func expandS3Settings(tfMap map[string]interface{}) *awstypes.S3Settings { if tfMap == nil { return nil } - apiObject := &dms.S3Settings{} + apiObject := &awstypes.S3Settings{} if v, ok := tfMap["add_column_name"].(bool); ok { apiObject.AddColumnName = aws.Bool(v) @@ -2298,7 +2286,7 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.BucketName = aws.String(v) } if v, ok := tfMap["canned_acl_for_objects"].(string); ok { - apiObject.CannedAclForObjects = aws.String(v) + apiObject.CannedAclForObjects = awstypes.CannedAclForObjectsValue(v) } if v, ok := tfMap["cdc_inserts_and_updates"].(bool); ok { apiObject.CdcInsertsAndUpdates = aws.Bool(v) @@ -2307,16 +2295,16 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.CdcInsertsOnly = aws.Bool(v) } if v, ok := tfMap["cdc_max_batch_interval"].(int); ok { - apiObject.CdcMaxBatchInterval = aws.Int64(int64(v)) + apiObject.CdcMaxBatchInterval = aws.Int32(int32(v)) } if v, ok := tfMap["cdc_min_file_size"].(int); ok { - apiObject.CdcMinFileSize = aws.Int64(int64(v)) + apiObject.CdcMinFileSize = aws.Int32(int32(v)) } if v, ok := tfMap["cdc_path"].(string); ok { apiObject.CdcPath = aws.String(v) } if v, ok := tfMap["compression_type"].(string); ok { - apiObject.CompressionType = aws.String(v) + apiObject.CompressionType = awstypes.CompressionTypeValue(v) } if v, ok := tfMap["csv_delimiter"].(string); ok { apiObject.CsvDelimiter = aws.String(v) @@ -2331,31 +2319,31 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.CsvRowDelimiter = aws.String(v) } if v, ok := tfMap["data_format"].(string); ok { - apiObject.DataFormat = aws.String(v) + apiObject.DataFormat = awstypes.DataFormatValue(v) } if v, ok := tfMap["data_page_size"].(int); ok { - apiObject.DataPageSize = aws.Int64(int64(v)) + apiObject.DataPageSize = aws.Int32(int32(v)) } if v, ok := tfMap["date_partition_delimiter"].(string); ok { - apiObject.DatePartitionDelimiter = aws.String(v) + apiObject.DatePartitionDelimiter = awstypes.DatePartitionDelimiterValue(v) } if v, ok := tfMap["date_partition_enabled"].(bool); ok { apiObject.DatePartitionEnabled = aws.Bool(v) } if v, ok := tfMap["date_partition_sequence"].(string); ok { - apiObject.DatePartitionSequence = aws.String(v) + apiObject.DatePartitionSequence = awstypes.DatePartitionSequenceValue(v) } if v, ok := tfMap["dict_page_size_limit"].(int); ok { - apiObject.DictPageSizeLimit = aws.Int64(int64(v)) + apiObject.DictPageSizeLimit = aws.Int32(int32(v)) } if v, ok := tfMap["enable_statistics"].(bool); ok { apiObject.EnableStatistics = aws.Bool(v) } if v, ok := tfMap["encoding_type"].(string); ok { - apiObject.EncodingType = aws.String(v) + apiObject.EncodingType = awstypes.EncodingTypeValue(v) } if v, ok := tfMap["encryption_mode"].(string); ok { - apiObject.EncryptionMode = aws.String(v) + apiObject.EncryptionMode = awstypes.EncryptionModeValue(v) } if v, ok := tfMap["external_table_definition"].(string); ok { apiObject.ExternalTableDefinition = aws.String(v) @@ -2364,19 +2352,19 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.GlueCatalogGeneration = aws.Bool(v) } if v, ok := tfMap["ignore_header_rows"].(int); ok { - apiObject.IgnoreHeaderRows = aws.Int64(int64(v)) + apiObject.IgnoreHeaderRows = aws.Int32(int32(v)) } if v, ok := tfMap["include_op_for_full_load"].(bool); ok { apiObject.IncludeOpForFullLoad = aws.Bool(v) } if v, ok := tfMap["max_file_size"].(int); ok { - apiObject.MaxFileSize = aws.Int64(int64(v)) + apiObject.MaxFileSize = aws.Int32(int32(v)) } if v, ok := tfMap["parquet_timestamp_in_millisecond"].(bool); ok { apiObject.ParquetTimestampInMillisecond = aws.Bool(v) } if v, ok := tfMap["parquet_version"].(string); ok { - apiObject.ParquetVersion = aws.String(v) + apiObject.ParquetVersion = awstypes.ParquetVersionValue(v) } if v, ok := tfMap["preserve_transactions"].(bool); ok { apiObject.PreserveTransactions = aws.Bool(v) @@ -2385,7 +2373,7 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.Rfc4180 = aws.Bool(v) } if v, ok := tfMap["row_group_length"].(int); ok { - apiObject.RowGroupLength = aws.Int64(int64(v)) + apiObject.RowGroupLength = aws.Int32(int32(v)) } if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok { apiObject.ServerSideEncryptionKmsKeyId = aws.String(v) @@ -2406,7 +2394,7 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { return apiObject } -func flattenS3Settings(apiObject *dms.S3Settings) []map[string]interface{} { +func flattenS3Settings(apiObject *awstypes.S3Settings) []map[string]interface{} { if apiObject == nil { return []map[string]interface{}{} } @@ -2414,115 +2402,99 @@ func flattenS3Settings(apiObject *dms.S3Settings) []map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.AddColumnName; v != nil { - tfMap["add_column_name"] = aws.BoolValue(v) + tfMap["add_column_name"] = aws.ToBool(v) } if v := apiObject.BucketFolder; v != nil { - tfMap["bucket_folder"] = aws.StringValue(v) + tfMap["bucket_folder"] = aws.ToString(v) } if v := apiObject.BucketName; v != nil { - tfMap[names.AttrBucketName] = aws.StringValue(v) - } - if v := apiObject.CannedAclForObjects; v != nil { - tfMap["canned_acl_for_objects"] = aws.StringValue(v) + tfMap[names.AttrBucketName] = aws.ToString(v) } + tfMap["canned_acl_for_objects"] = string(apiObject.CannedAclForObjects) if v := apiObject.CdcInsertsAndUpdates; v != nil { - tfMap["cdc_inserts_and_updates"] = aws.BoolValue(v) + tfMap["cdc_inserts_and_updates"] = aws.ToBool(v) } if v := apiObject.CdcInsertsOnly; v != nil { - tfMap["cdc_inserts_only"] = aws.BoolValue(v) + tfMap["cdc_inserts_only"] = aws.ToBool(v) } if v := apiObject.CdcMaxBatchInterval; v != nil { - tfMap["cdc_max_batch_interval"] = aws.Int64Value(v) + tfMap["cdc_max_batch_interval"] = aws.ToInt32(v) } if v := apiObject.CdcMinFileSize; v != nil { - tfMap["cdc_min_file_size"] = aws.Int64Value(v) + tfMap["cdc_min_file_size"] = aws.ToInt32(v) } if v := apiObject.CdcPath; v != nil { - tfMap["cdc_path"] = aws.StringValue(v) - } - if v := apiObject.CompressionType; v != nil { - tfMap["compression_type"] = aws.StringValue(v) + tfMap["cdc_path"] = aws.ToString(v) } + tfMap["compression_type"] = string(apiObject.CompressionType) if v := apiObject.CsvDelimiter; v != nil { - tfMap["csv_delimiter"] = aws.StringValue(v) + tfMap["csv_delimiter"] = aws.ToString(v) } if v := apiObject.CsvNoSupValue; v != nil { - tfMap["csv_no_sup_value"] = aws.StringValue(v) + tfMap["csv_no_sup_value"] = aws.ToString(v) } if v := apiObject.CsvNullValue; v != nil { - tfMap["csv_null_value"] = aws.StringValue(v) + tfMap["csv_null_value"] = aws.ToString(v) } if v := apiObject.CsvRowDelimiter; v != nil { - tfMap["csv_row_delimiter"] = aws.StringValue(v) - } - if v := apiObject.DataFormat; v != nil { - tfMap["data_format"] = aws.StringValue(v) + tfMap["csv_row_delimiter"] = aws.ToString(v) } + tfMap["data_format"] = string(apiObject.DataFormat) if v := apiObject.DataPageSize; v != nil { - tfMap["data_page_size"] = aws.Int64Value(v) - } - if v := apiObject.DatePartitionDelimiter; v != nil { - tfMap["date_partition_delimiter"] = aws.StringValue(v) + tfMap["data_page_size"] = aws.ToInt32(v) } + tfMap["date_partition_delimiter"] = string(apiObject.DatePartitionDelimiter) if v := apiObject.DatePartitionEnabled; v != nil { - tfMap["date_partition_enabled"] = aws.BoolValue(v) - } - if v := apiObject.DatePartitionSequence; v != nil { - tfMap["date_partition_sequence"] = aws.StringValue(v) + tfMap["date_partition_enabled"] = aws.ToBool(v) } + tfMap["date_partition_sequence"] = string(apiObject.DatePartitionSequence) if v := apiObject.DictPageSizeLimit; v != nil { - tfMap["dict_page_size_limit"] = aws.Int64Value(v) + tfMap["dict_page_size_limit"] = aws.ToInt32(v) } if v := apiObject.EnableStatistics; v != nil { - tfMap["enable_statistics"] = aws.BoolValue(v) - } - if v := apiObject.EncodingType; v != nil { - tfMap["encoding_type"] = aws.StringValue(v) - } - if v := apiObject.EncryptionMode; v != nil { - tfMap["encryption_mode"] = aws.StringValue(v) + tfMap["enable_statistics"] = aws.ToBool(v) } + tfMap["encoding_type"] = string(apiObject.EncodingType) + tfMap["encryption_mode"] = string(apiObject.EncryptionMode) if v := apiObject.ExternalTableDefinition; v != nil { - tfMap["external_table_definition"] = aws.StringValue(v) + tfMap["external_table_definition"] = aws.ToString(v) } if v := apiObject.GlueCatalogGeneration; v != nil { - tfMap["glue_catalog_generation"] = aws.BoolValue(v) + tfMap["glue_catalog_generation"] = aws.ToBool(v) } if v := apiObject.IgnoreHeaderRows; v != nil { - tfMap["ignore_header_rows"] = aws.Int64Value(v) + tfMap["ignore_header_rows"] = aws.ToInt32(v) } if v := apiObject.IncludeOpForFullLoad; v != nil { - tfMap["include_op_for_full_load"] = aws.BoolValue(v) + tfMap["include_op_for_full_load"] = aws.ToBool(v) } if v := apiObject.MaxFileSize; v != nil { - tfMap["max_file_size"] = aws.Int64Value(v) + tfMap["max_file_size"] = aws.ToInt32(v) } if v := apiObject.ParquetTimestampInMillisecond; v != nil { - tfMap["parquet_timestamp_in_millisecond"] = aws.BoolValue(v) - } - if v := apiObject.ParquetVersion; v != nil { - tfMap["parquet_version"] = aws.StringValue(v) + tfMap["parquet_timestamp_in_millisecond"] = aws.ToBool(v) } + tfMap["parquet_version"] = string(apiObject.ParquetVersion) if v := apiObject.Rfc4180; v != nil { - tfMap["rfc_4180"] = aws.BoolValue(v) + tfMap["rfc_4180"] = aws.ToBool(v) } if v := apiObject.RowGroupLength; v != nil { - tfMap["row_group_length"] = aws.Int64Value(v) + tfMap["row_group_length"] = aws.ToInt32(v) } if v := apiObject.ServerSideEncryptionKmsKeyId; v != nil { - tfMap["server_side_encryption_kms_key_id"] = aws.StringValue(v) + tfMap["server_side_encryption_kms_key_id"] = aws.ToString(v) } if v := apiObject.ServiceAccessRoleArn; v != nil { - tfMap["service_access_role_arn"] = aws.StringValue(v) + tfMap["service_access_role_arn"] = aws.ToString(v) } if v := apiObject.TimestampColumnName; v != nil { - tfMap["timestamp_column_name"] = aws.StringValue(v) + tfMap["timestamp_column_name"] = aws.ToString(v) } if v := apiObject.UseCsvNoSupValue; v != nil { - tfMap["use_csv_no_sup_value"] = aws.BoolValue(v) + tfMap["use_csv_no_sup_value"] = aws.ToBool(v) } if v := apiObject.UseTaskStartTimeForFullLoadTimestamp; v != nil { - tfMap["use_task_start_time_for_full_load_timestamp"] = aws.BoolValue(v) + tfMap["use_task_start_time_for_full_load_timestamp"] = aws.ToBool(v) } return []map[string]interface{}{tfMap} @@ -2620,7 +2592,7 @@ func expandTopLevelConnectionInfo(d *schema.ResourceData, input *dms.CreateEndpo input.Username = aws.String(d.Get(names.AttrUsername).(string)) input.Password = aws.String(d.Get(names.AttrPassword).(string)) input.ServerName = aws.String(d.Get("server_name").(string)) - input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) if v, ok := d.GetOk(names.AttrDatabaseName); ok { input.DatabaseName = aws.String(v.(string)) @@ -2631,26 +2603,26 @@ func expandTopLevelConnectionInfoModify(d *schema.ResourceData, input *dms.Modif input.Username = aws.String(d.Get(names.AttrUsername).(string)) input.Password = aws.String(d.Get(names.AttrPassword).(string)) input.ServerName = aws.String(d.Get("server_name").(string)) - input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) if v, ok := d.GetOk(names.AttrDatabaseName); ok { input.DatabaseName = aws.String(v.(string)) } } -func flattenTopLevelConnectionInfo(d *schema.ResourceData, endpoint *dms.Endpoint) { +func flattenTopLevelConnectionInfo(d *schema.ResourceData, endpoint *awstypes.Endpoint) { d.Set(names.AttrUsername, endpoint.Username) d.Set("server_name", endpoint.ServerName) d.Set(names.AttrPort, endpoint.Port) d.Set(names.AttrDatabaseName, endpoint.DatabaseName) } -func FindEndpointByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.Endpoint, error) { +func findEndpointByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.Endpoint, error) { input := &dms.DescribeEndpointsInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("endpoint-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -2658,50 +2630,43 @@ func FindEndpointByID(ctx context.Context, conn *dms.DatabaseMigrationService, i return findEndpoint(ctx, conn, input) } -func findEndpoint(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEndpointsInput) (*dms.Endpoint, error) { +func findEndpoint(ctx context.Context, conn *dms.Client, input *dms.DescribeEndpointsInput) (*awstypes.Endpoint, error) { output, err := findEndpoints(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findEndpoints(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEndpointsInput) ([]*dms.Endpoint, error) { - var output []*dms.Endpoint +func findEndpoints(ctx context.Context, conn *dms.Client, input *dms.DescribeEndpointsInput) ([]awstypes.Endpoint, error) { + var output []awstypes.Endpoint - err := conn.DescribeEndpointsPagesWithContext(ctx, input, func(page *dms.DescribeEndpointsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeEndpointsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Endpoints { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Endpoints...) } return output, nil } -func statusEndpoint(ctx context.Context, conn *dms.DatabaseMigrationService, id string) retry.StateRefreshFunc { +func statusEndpoint(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindEndpointByID(ctx, conn, id) + output, err := findEndpointByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -2711,11 +2676,11 @@ func statusEndpoint(ctx context.Context, conn *dms.DatabaseMigrationService, id return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func waitEndpointDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { +func waitEndpointDeleted(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.Endpoint, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{endpointStatusDeleting}, Target: []string{}, @@ -2723,7 +2688,11 @@ func waitEndpointDeleted(ctx context.Context, conn *dms.DatabaseMigrationService Timeout: timeout, } - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Endpoint); ok { + return output, err + } - return err + return nil, err } diff --git a/internal/service/dms/endpoint_data_source.go b/internal/service/dms/endpoint_data_source.go index f6391acd9d4..89f27a6d771 100644 --- a/internal/service/dms/endpoint_data_source.go +++ b/internal/service/dms/endpoint_data_source.go @@ -6,7 +6,7 @@ package dms import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -15,8 +15,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_endpoint") -func DataSourceEndpoint() *schema.Resource { +// @SDKDataSource("aws_dms_endpoint", name="Endpoint") +func dataSourceEndpoint() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceEndpointRead, @@ -572,20 +572,20 @@ func DataSourceEndpoint() *schema.Resource { func dataSourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig endptID := d.Get("endpoint_id").(string) - out, err := FindEndpointByID(ctx, conn, endptID) + out, err := findEndpointByID(ctx, conn, endptID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Endpoint (%s): %s", endptID, err) } - d.SetId(aws.StringValue(out.EndpointIdentifier)) + d.SetId(aws.ToString(out.EndpointIdentifier)) d.Set("endpoint_id", out.EndpointIdentifier) - arn := aws.StringValue(out.EndpointArn) + arn := aws.ToString(out.EndpointArn) d.Set("endpoint_arn", arn) d.Set(names.AttrEndpointType, out.EndpointType) d.Set(names.AttrDatabaseName, out.DatabaseName) @@ -600,6 +600,7 @@ func dataSourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta in } tags, err := listTags(ctx, conn, arn) + if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for DMS Endpoint (%s): %s", arn, err) } diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 24c3d91ba1d..49afff18070 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -2212,7 +2212,7 @@ func TestAccDMSEndpoint_pauseReplicationTasks(t *testing.T) { endpointNameSource := "aws_dms_endpoint.source" endpointNameTarget := "aws_dms_endpoint.target" replicationTaskName := "aws_dms_replication_task.test" - var task dms.ReplicationTask + var task awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -2253,7 +2253,7 @@ func testAccCheckResourceAttrRegionalHostname(resourceName, attributeName, servi func testAccCheckEndpointDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dms_endpoint" { @@ -2284,11 +2284,7 @@ func testAccCheckEndpointExists(ctx context.Context, n string) resource.TestChec return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No DMS Endpoint ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindEndpointByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index a966dd5d7d2..d675a58a0ca 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -9,15 +9,16 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -28,7 +29,7 @@ import ( // @SDKResource("aws_dms_event_subscription", name="Event Subscription") // @Tags(identifierAttribute="arn") -func ResourceEventSubscription() *schema.Resource { +func resourceEventSubscription() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceEventSubscriptionCreate, ReadWithoutTimeout: resourceEventSubscriptionRead, @@ -96,12 +97,12 @@ func ResourceEventSubscription() *schema.Resource { func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) name := d.Get(names.AttrName).(string) input := &dms.CreateEventSubscriptionInput{ Enabled: aws.Bool(d.Get(names.AttrEnabled).(bool)), - EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), + EventCategories: flex.ExpandStringValueSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get(names.AttrSNSTopicARN).(string)), SourceType: aws.String(d.Get(names.AttrSourceType).(string)), SubscriptionName: aws.String(name), @@ -109,10 +110,10 @@ func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("source_ids"); ok && v.(*schema.Set).Len() > 0 { - input.SourceIds = flex.ExpandStringSet(v.(*schema.Set)) + input.SourceIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.CreateEventSubscriptionWithContext(ctx, input) + _, err := conn.CreateEventSubscription(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Event Subscription (%s): %s", name, err) @@ -129,9 +130,9 @@ func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - subscription, err := FindEventSubscriptionByName(ctx, conn, d.Id()) + subscription, err := findEventSubscriptionByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Event Subscription (%s) not found, removing from state", d.Id()) @@ -152,10 +153,10 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, }.String() d.Set(names.AttrARN, arn) d.Set(names.AttrEnabled, subscription.Enabled) - d.Set("event_categories", aws.StringValueSlice(subscription.EventCategoriesList)) + d.Set("event_categories", subscription.EventCategoriesList) d.Set(names.AttrName, d.Id()) d.Set(names.AttrSNSTopicARN, subscription.SnsTopicArn) - d.Set("source_ids", aws.StringValueSlice(subscription.SourceIdsList)) + d.Set("source_ids", subscription.SourceIdsList) d.Set(names.AttrSourceType, subscription.SourceType) return diags @@ -163,18 +164,18 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &dms.ModifyEventSubscriptionInput{ Enabled: aws.Bool(d.Get(names.AttrEnabled).(bool)), - EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), + EventCategories: flex.ExpandStringValueSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get(names.AttrSNSTopicARN).(string)), SourceType: aws.String(d.Get(names.AttrSourceType).(string)), SubscriptionName: aws.String(d.Id()), } - _, err := conn.ModifyEventSubscriptionWithContext(ctx, input) + _, err := conn.ModifyEventSubscription(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying DMS Event Subscription (%s): %s", d.Id(), err) @@ -190,14 +191,14 @@ func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Event Subscription: %s", d.Id()) - _, err := conn.DeleteEventSubscriptionWithContext(ctx, &dms.DeleteEventSubscriptionInput{ + _, err := conn.DeleteEventSubscription(ctx, &dms.DeleteEventSubscriptionInput{ SubscriptionName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -212,7 +213,7 @@ func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData return diags } -func FindEventSubscriptionByName(ctx context.Context, conn *dms.DatabaseMigrationService, name string) (*dms.EventSubscription, error) { +func findEventSubscriptionByName(ctx context.Context, conn *dms.Client, name string) (*awstypes.EventSubscription, error) { input := &dms.DescribeEventSubscriptionsInput{ SubscriptionName: aws.String(name), } @@ -220,50 +221,44 @@ func FindEventSubscriptionByName(ctx context.Context, conn *dms.DatabaseMigratio return findEventSubscription(ctx, conn, input) } -func findEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) (*dms.EventSubscription, error) { +func findEventSubscription(ctx context.Context, conn *dms.Client, input *dms.DescribeEventSubscriptionsInput) (*awstypes.EventSubscription, error) { output, err := findEventSubscriptions(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findEventSubscriptions(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) ([]*dms.EventSubscription, error) { - var output []*dms.EventSubscription +func findEventSubscriptions(ctx context.Context, conn *dms.Client, input *dms.DescribeEventSubscriptionsInput) ([]awstypes.EventSubscription, error) { + var output []awstypes.EventSubscription - err := conn.DescribeEventSubscriptionsPagesWithContext(ctx, input, func(page *dms.DescribeEventSubscriptionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeEventSubscriptionsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.EventSubscriptionsList { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.EventSubscriptionsList...) } return output, nil } -func statusEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, name string) retry.StateRefreshFunc { +func statusEventSubscription(ctx context.Context, conn *dms.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindEventSubscriptionByName(ctx, conn, name) + output, err := findEventSubscriptionByName(ctx, conn, name) if tfresource.NotFound(err) { return nil, "", nil @@ -273,11 +268,11 @@ func statusEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationSer return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func waitEventSubscriptionCreated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { +func waitEventSubscriptionCreated(ctx context.Context, conn *dms.Client, name string, timeout time.Duration) (*awstypes.EventSubscription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eventSubscriptionStatusCreating, eventSubscriptionStatusModifying}, Target: []string{eventSubscriptionStatusActive}, @@ -289,14 +284,14 @@ func waitEventSubscriptionCreated(ctx context.Context, conn *dms.DatabaseMigrati outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.EventSubscription); ok { + if output, ok := outputRaw.(*awstypes.EventSubscription); ok { return output, err } return nil, err } -func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { +func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.Client, name string, timeout time.Duration) (*awstypes.EventSubscription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eventSubscriptionStatusModifying}, Target: []string{eventSubscriptionStatusActive}, @@ -308,14 +303,14 @@ func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.DatabaseMigrati outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.EventSubscription); ok { + if output, ok := outputRaw.(*awstypes.EventSubscription); ok { return output, err } return nil, err } -func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { +func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.Client, name string, timeout time.Duration) (*awstypes.EventSubscription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eventSubscriptionStatusDeleting}, Target: []string{}, @@ -327,7 +322,7 @@ func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.DatabaseMigrati outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.EventSubscription); ok { + if output, ok := outputRaw.(*awstypes.EventSubscription); ok { return output, err } diff --git a/internal/service/dms/event_subscription_test.go b/internal/service/dms/event_subscription_test.go index 7bdef14fbe4..d725bd5817d 100644 --- a/internal/service/dms/event_subscription_test.go +++ b/internal/service/dms/event_subscription_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccDMSEventSubscription_basic(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" snsTopicResourceName := "aws_sns_topic.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -57,7 +57,7 @@ func TestAccDMSEventSubscription_basic(t *testing.T) { func TestAccDMSEventSubscription_disappears(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -81,7 +81,7 @@ func TestAccDMSEventSubscription_disappears(t *testing.T) { func TestAccDMSEventSubscription_enabled(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -123,7 +123,7 @@ func TestAccDMSEventSubscription_enabled(t *testing.T) { func TestAccDMSEventSubscription_eventCategories(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -164,7 +164,7 @@ func TestAccDMSEventSubscription_eventCategories(t *testing.T) { func TestAccDMSEventSubscription_tags(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_event_subscription.test" @@ -215,7 +215,7 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) @@ -234,14 +234,14 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec } } -func testAccCheckEventSubscriptionExists(ctx context.Context, n string, v *dms.EventSubscription) resource.TestCheckFunc { +func testAccCheckEventSubscriptionExists(ctx context.Context, n string, v *awstypes.EventSubscription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) output, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/exports_test.go b/internal/service/dms/exports_test.go index 89008c13954..ac96699f63f 100644 --- a/internal/service/dms/exports_test.go +++ b/internal/service/dms/exports_test.go @@ -5,9 +5,25 @@ package dms // Exports for use in tests only. var ( - TaskSettingsEqual = taskSettingsEqual - ValidEndpointID = validEndpointID - ValidReplicationInstanceID = validReplicationInstanceID - ValidReplicationSubnetGroupID = validReplicationSubnetGroupID - ValidReplicationTaskID = validReplicationTaskID + ResourceCertificate = resourceCertificate + ResourceEndpoint = resourceEndpoint + ResourceEventSubscription = resourceEventSubscription + ResourceReplicationConfig = resourceReplicationConfig + ResourceReplicationInstance = resourceReplicationInstance + ResourceReplicationSubnetGroup = resourceReplicationSubnetGroup + ResourceReplicationTask = resourceReplicationTask + ResourceS3Endpoint = resourceS3Endpoint + + FindCertificateByID = findCertificateByID + FindEndpointByID = findEndpointByID + FindEventSubscriptionByName = findEventSubscriptionByName + FindReplicationConfigByARN = findReplicationConfigByARN + FindReplicationInstanceByID = findReplicationInstanceByID + FindReplicationSubnetGroupByID = findReplicationSubnetGroupByID + FindReplicationTaskByID = findReplicationTaskByID + TaskSettingsEqual = taskSettingsEqual + ValidEndpointID = validEndpointID + ValidReplicationInstanceID = validReplicationInstanceID + ValidReplicationSubnetGroupID = validReplicationSubnetGroupID + ValidReplicationTaskID = validReplicationTaskID ) diff --git a/internal/service/dms/generate.go b/internal/service/dms/generate.go index a9da99dfa6b..26422fa810d 100644 --- a/internal/service/dms/generate.go +++ b/internal/service/dms/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOutTagsElem=TagList -ServiceTagsSlice -TagOp=AddTagsToResource -UntagOp=RemoveTagsFromResource -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsOutTagsElem=TagList -ServiceTagsSlice -TagOp=AddTagsToResource -UntagOp=RemoveTagsFromResource -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index a3646109b68..734cdb42c83 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -10,14 +10,16 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -29,7 +31,7 @@ import ( // @SDKResource("aws_dms_replication_config", name="Replication Config") // @Tags(identifierAttribute="id") -func ResourceReplicationConfig() *schema.Resource { +func resourceReplicationConfig() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationConfigCreate, ReadWithoutTimeout: resourceReplicationConfigRead, @@ -127,9 +129,9 @@ func ResourceReplicationConfig() *schema.Resource { DiffSuppressOnRefresh: true, }, "replication_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.MigrationTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.MigrationTypeValue](), }, "resource_identifier": { Type: schema.TypeString, @@ -174,12 +176,12 @@ func ResourceReplicationConfig() *schema.Resource { func resourceReplicationConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) replicationConfigID := d.Get("replication_config_identifier").(string) input := &dms.CreateReplicationConfigInput{ ReplicationConfigIdentifier: aws.String(replicationConfigID), - ReplicationType: aws.String(d.Get("replication_type").(string)), + ReplicationType: awstypes.MigrationTypeValue(d.Get("replication_type").(string)), SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), TableMappings: aws.String(d.Get("table_mappings").(string)), Tags: getTagsIn(ctx), @@ -202,13 +204,13 @@ func resourceReplicationConfigCreate(ctx context.Context, d *schema.ResourceData input.SupplementalSettings = aws.String(v.(string)) } - output, err := conn.CreateReplicationConfigWithContext(ctx, input) + output, err := conn.CreateReplicationConfig(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Config (%s): %s", replicationConfigID, err) } - d.SetId(aws.StringValue(output.ReplicationConfig.ReplicationConfigArn)) + d.SetId(aws.ToString(output.ReplicationConfig.ReplicationConfigArn)) if d.Get("start_replication").(bool) { if err := startReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -221,9 +223,9 @@ func resourceReplicationConfigCreate(ctx context.Context, d *schema.ResourceData func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - replicationConfig, err := FindReplicationConfigByARN(ctx, conn, d.Id()) + replicationConfig, err := findReplicationConfigByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Config (%s) not found, removing from state", d.Id()) @@ -252,7 +254,7 @@ func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll, "start_replication") { if err := stopReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -274,7 +276,7 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("replication_type") { - input.ReplicationType = aws.String(d.Get("replication_type").(string)) + input.ReplicationType = awstypes.MigrationTypeValue(d.Get("replication_type").(string)) } if d.HasChange("source_endpoint_arn") { @@ -293,7 +295,7 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData input.TargetEndpointArn = aws.String(d.Get("target_endpoint_arn").(string)) } - _, err := conn.ModifyReplicationConfigWithContext(ctx, input) + _, err := conn.ModifyReplicationConfig(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying DMS Replication Config (%s): %s", d.Id(), err) @@ -307,7 +309,7 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("start_replication") { - var f func(context.Context, *dms.DatabaseMigrationService, string, time.Duration) error + var f func(context.Context, *dms.Client, string, time.Duration) error if d.Get("start_replication").(bool) { f = startReplication } else { @@ -323,18 +325,18 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData func resourceReplicationConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if err := stopReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting DMS Replication Config: %s", d.Id()) - _, err := conn.DeleteReplicationConfigWithContext(ctx, &dms.DeleteReplicationConfigInput{ + _, err := conn.DeleteReplicationConfig(ctx, &dms.DeleteReplicationConfigInput{ ReplicationConfigArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -349,111 +351,99 @@ func resourceReplicationConfigDelete(ctx context.Context, d *schema.ResourceData return diags } -func FindReplicationConfigByARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) (*dms.ReplicationConfig, error) { +func findReplicationConfigByARN(ctx context.Context, conn *dms.Client, arn string) (*awstypes.ReplicationConfig, error) { input := &dms.DescribeReplicationConfigsInput{ - Filters: []*dms.Filter{{ + Filters: []awstypes.Filter{{ Name: aws.String("replication-config-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }}, } return findReplicationConfig(ctx, conn, input) } -func findReplicationConfig(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationConfigsInput) (*dms.ReplicationConfig, error) { +func findReplicationConfig(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationConfigsInput) (*awstypes.ReplicationConfig, error) { output, err := findReplicationConfigs(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationConfigs(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationConfigsInput) ([]*dms.ReplicationConfig, error) { - var output []*dms.ReplicationConfig +func findReplicationConfigs(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationConfigsInput) ([]awstypes.ReplicationConfig, error) { + var output []awstypes.ReplicationConfig - err := conn.DescribeReplicationConfigsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationConfigsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationConfigs { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationConfigs...) } return output, nil } -func findReplicationByReplicationConfigARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) (*dms.Replication, error) { +func findReplicationByReplicationConfigARN(ctx context.Context, conn *dms.Client, arn string) (*awstypes.Replication, error) { input := &dms.DescribeReplicationsInput{ - Filters: []*dms.Filter{{ + Filters: []awstypes.Filter{{ Name: aws.String("replication-config-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }}, } return findReplication(ctx, conn, input) } -func findReplication(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationsInput) (*dms.Replication, error) { +func findReplication(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationsInput) (*awstypes.Replication, error) { output, err := findReplications(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplications(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationsInput) ([]*dms.Replication, error) { - var output []*dms.Replication +func findReplications(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationsInput) ([]awstypes.Replication, error) { + var output []awstypes.Replication - err := conn.DescribeReplicationsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Replications { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Replications...) } return output, nil } -func statusReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) retry.StateRefreshFunc { +func statusReplication(ctx context.Context, conn *dms.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findReplicationByReplicationConfigARN(ctx, conn, arn) @@ -465,27 +455,24 @@ func statusReplication(ctx context.Context, conn *dms.DatabaseMigrationService, return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func setLastReplicationError(err error, replication *dms.Replication) { +func setLastReplicationError(err error, replication *awstypes.Replication) { var errs []error - errs = append(errs, tfslices.ApplyToAll(replication.FailureMessages, func(v *string) error { - if v := aws.StringValue(v); v != "" { - return errors.New(v) - } - return nil + errs = append(errs, tfslices.ApplyToAll(replication.FailureMessages, func(v string) error { + return errors.New(v) })...) - if v := aws.StringValue(replication.StopReason); v != "" { + if v := aws.ToString(replication.StopReason); v != "" { errs = append(errs, errors.New(v)) } tfresource.SetLastError(err, errors.Join(errs...)) } -func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { +func waitReplicationRunning(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) (*awstypes.Replication, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ replicationStatusReady, @@ -506,7 +493,7 @@ func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.Replication); ok { + if output, ok := outputRaw.(*awstypes.Replication); ok { setLastReplicationError(err, output) return output, err } @@ -514,7 +501,7 @@ func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationServ return nil, err } -func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { +func waitReplicationStopped(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) (*awstypes.Replication, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationStatusStopping, replicationStatusRunning}, Target: []string{replicationStatusStopped}, @@ -526,7 +513,7 @@ func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.Replication); ok { + if output, ok := outputRaw.(*awstypes.Replication); ok { setLastReplicationError(err, output) return output, err } @@ -534,7 +521,7 @@ func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationServ return nil, err } -func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { +func waitReplicationDeleted(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) (*awstypes.Replication, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusDeleting, replicationStatusStopped}, Target: []string{}, @@ -546,7 +533,7 @@ func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.Replication); ok { + if output, ok := outputRaw.(*awstypes.Replication); ok { setLastReplicationError(err, output) return output, err } @@ -554,14 +541,14 @@ func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationServ return nil, err } -func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) error { +func startReplication(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) error { replication, err := findReplicationByReplicationConfigARN(ctx, conn, arn) if err != nil { return fmt.Errorf("reading DMS Replication Config (%s) replication: %s", arn, err) } - replicationStatus := aws.StringValue(replication.Status) + replicationStatus := aws.ToString(replication.Status) if replicationStatus == replicationStatusRunning { return nil } @@ -575,7 +562,7 @@ func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, a StartReplicationType: aws.String(startReplicationType), } - _, err = conn.StartReplicationWithContext(ctx, input) + _, err = conn.StartReplication(ctx, input) if err != nil { return fmt.Errorf("starting DMS Serverless Replication (%s): %w", arn, err) @@ -588,7 +575,7 @@ func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, a return nil } -func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) error { +func stopReplication(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) error { replication, err := findReplicationByReplicationConfigARN(ctx, conn, arn) if tfresource.NotFound(err) { @@ -599,8 +586,7 @@ func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, ar return fmt.Errorf("reading DMS Replication Config (%s) replication: %s", arn, err) } - replicationStatus := aws.StringValue(replication.Status) - if replicationStatus == replicationStatusStopped || replicationStatus == replicationStatusCreated || replicationStatus == replicationStatusFailed { + if replicationStatus := aws.ToString(replication.Status); replicationStatus == replicationStatusStopped || replicationStatus == replicationStatusCreated || replicationStatus == replicationStatusFailed { return nil } @@ -608,7 +594,7 @@ func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, ar ReplicationConfigArn: aws.String(arn), } - _, err = conn.StopReplicationWithContext(ctx, input) + _, err = conn.StopReplication(ctx, input) if err != nil { return fmt.Errorf("stopping DMS Serverless Replication (%s): %w", arn, err) @@ -621,32 +607,32 @@ func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, ar return nil } -func flattenComputeConfig(apiObject *dms.ComputeConfig) []interface{} { +func flattenComputeConfig(apiObject *awstypes.ComputeConfig) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - names.AttrAvailabilityZone: aws.StringValue(apiObject.AvailabilityZone), - "dns_name_servers": aws.StringValue(apiObject.DnsNameServers), - names.AttrKMSKeyID: aws.StringValue(apiObject.KmsKeyId), - "max_capacity_units": aws.Int64Value(apiObject.MaxCapacityUnits), - "min_capacity_units": aws.Int64Value(apiObject.MinCapacityUnits), - "multi_az": aws.BoolValue(apiObject.MultiAZ), - names.AttrPreferredMaintenanceWindow: aws.StringValue(apiObject.PreferredMaintenanceWindow), - "replication_subnet_group_id": aws.StringValue(apiObject.ReplicationSubnetGroupId), - names.AttrVPCSecurityGroupIDs: flex.FlattenStringSet(apiObject.VpcSecurityGroupIds), + names.AttrAvailabilityZone: aws.ToString(apiObject.AvailabilityZone), + "dns_name_servers": aws.ToString(apiObject.DnsNameServers), + names.AttrKMSKeyID: aws.ToString(apiObject.KmsKeyId), + "max_capacity_units": aws.ToInt32(apiObject.MaxCapacityUnits), + "min_capacity_units": aws.ToInt32(apiObject.MinCapacityUnits), + "multi_az": aws.ToBool(apiObject.MultiAZ), + names.AttrPreferredMaintenanceWindow: aws.ToString(apiObject.PreferredMaintenanceWindow), + "replication_subnet_group_id": aws.ToString(apiObject.ReplicationSubnetGroupId), + names.AttrVPCSecurityGroupIDs: apiObject.VpcSecurityGroupIds, } return []interface{}{tfMap} } -func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { +func expandComputeConfigInput(tfMap map[string]interface{}) *awstypes.ComputeConfig { if tfMap == nil { return nil } - apiObject := &dms.ComputeConfig{} + apiObject := &awstypes.ComputeConfig{} if v, ok := tfMap[names.AttrAvailabilityZone].(string); ok && v != "" { apiObject.AvailabilityZone = aws.String(v) @@ -661,11 +647,11 @@ func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { } if v, ok := tfMap["max_capacity_units"].(int); ok && v != 0 { - apiObject.MaxCapacityUnits = aws.Int64(int64(v)) + apiObject.MaxCapacityUnits = aws.Int32(int32(v)) } if v, ok := tfMap["min_capacity_units"].(int); ok && v != 0 { - apiObject.MinCapacityUnits = aws.Int64(int64(v)) + apiObject.MinCapacityUnits = aws.Int32(int32(v)) } if v, ok := tfMap["multi_az"].(bool); ok { @@ -681,7 +667,7 @@ func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { } if v, ok := tfMap[names.AttrVPCSecurityGroupIDs].(*schema.Set); ok && v.Len() > 0 { - apiObject.VpcSecurityGroupIds = flex.ExpandStringSet(v) + apiObject.VpcSecurityGroupIds = flex.ExpandStringValueSet(v) } return apiObject diff --git a/internal/service/dms/replication_config_test.go b/internal/service/dms/replication_config_test.go index 2cfdd78bd26..92ca9c91869 100644 --- a/internal/service/dms/replication_config_test.go +++ b/internal/service/dms/replication_config_test.go @@ -10,12 +10,13 @@ import ( "testing" "github.com/YakDriver/regexache" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -24,12 +25,12 @@ import ( func TestAccDMSReplicationConfig_basic(t *testing.T) { t.Parallel() - for _, migrationType := range dms.MigrationTypeValue_Values() { //nolint:paralleltest // false positive + for _, migrationType := range enum.Values[awstypes.MigrationTypeValue]() { //nolint:paralleltest // false positive t.Run(migrationType, func(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -82,7 +83,7 @@ func TestAccDMSReplicationConfig_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -106,7 +107,7 @@ func TestAccDMSReplicationConfig_settings_EnableLogging(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -199,7 +200,7 @@ func TestAccDMSReplicationConfig_settings_LogComponents(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -232,7 +233,7 @@ func TestAccDMSReplicationConfig_settings_StreamBuffer(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -263,7 +264,7 @@ func TestAccDMSReplicationConfig_tags(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -304,7 +305,7 @@ func TestAccDMSReplicationConfig_update(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -341,7 +342,7 @@ func TestAccDMSReplicationConfig_startReplication(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -373,14 +374,14 @@ func TestAccDMSReplicationConfig_startReplication(t *testing.T) { }) } -func testAccCheckReplicationConfigExists(ctx context.Context, n string, v *dms.ReplicationConfig) resource.TestCheckFunc { +func testAccCheckReplicationConfigExists(ctx context.Context, n string, v *awstypes.ReplicationConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) output, err := tfdms.FindReplicationConfigByARN(ctx, conn, rs.Primary.ID) @@ -401,7 +402,7 @@ func testAccCheckReplicationConfigDestroy(ctx context.Context) resource.TestChec continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationConfigByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/replication_instance.go b/internal/service/dms/replication_instance.go index 585022d167c..8c18532cf5c 100644 --- a/internal/service/dms/replication_instance.go +++ b/internal/service/dms/replication_instance.go @@ -8,14 +8,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -27,7 +28,7 @@ import ( // @SDKResource("aws_dms_replication_instance", name="Replication Instance") // @Tags(identifierAttribute="replication_instance_arn") -func ResourceReplicationInstance() *schema.Resource { +func resourceReplicationInstance() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationInstanceCreate, ReadWithoutTimeout: resourceReplicationInstanceRead, @@ -153,7 +154,7 @@ func ResourceReplicationInstance() *schema.Resource { func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) replicationInstanceID := d.Get("replication_instance_id").(string) input := &dms.CreateReplicationInstanceInput{ @@ -170,7 +171,7 @@ func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceDa // to set the default value. See GitHub Issue #5694 https://github.com/hashicorp/terraform/issues/5694 if v, ok := d.GetOk(names.AttrAllocatedStorage); ok { - input.AllocatedStorage = aws.Int64(int64(v.(int))) + input.AllocatedStorage = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk(names.AttrAvailabilityZone); ok { input.AvailabilityZone = aws.String(v.(string)) @@ -191,10 +192,10 @@ func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceDa input.ReplicationSubnetGroupIdentifier = aws.String(v.(string)) } if v, ok := d.GetOk(names.AttrVPCSecurityGroupIDs); ok { - input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + input.VpcSecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.CreateReplicationInstanceWithContext(ctx, input) + _, err := conn.CreateReplicationInstance(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Instance (%s): %s", replicationInstanceID, err) @@ -211,9 +212,9 @@ func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceDa func resourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - instance, err := FindReplicationInstanceByID(ctx, conn, d.Id()) + instance, err := findReplicationInstanceByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Instance (%s) not found, removing from state", d.Id()) @@ -237,11 +238,11 @@ func resourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData d.Set("replication_instance_arn", instance.ReplicationInstanceArn) d.Set("replication_instance_class", instance.ReplicationInstanceClass) d.Set("replication_instance_id", instance.ReplicationInstanceIdentifier) - d.Set("replication_instance_private_ips", aws.StringValueSlice(instance.ReplicationInstancePrivateIpAddresses)) - d.Set("replication_instance_public_ips", aws.StringValueSlice(instance.ReplicationInstancePublicIpAddresses)) + d.Set("replication_instance_private_ips", instance.ReplicationInstancePrivateIpAddresses) + d.Set("replication_instance_public_ips", instance.ReplicationInstancePublicIpAddresses) d.Set("replication_subnet_group_id", instance.ReplicationSubnetGroup.ReplicationSubnetGroupIdentifier) - vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(sg *dms.VpcSecurityGroupMembership) string { - return aws.StringValue(sg.VpcSecurityGroupId) + vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(v awstypes.VpcSecurityGroupMembership) string { + return aws.ToString(v.VpcSecurityGroupId) }) d.Set(names.AttrVPCSecurityGroupIDs, vpcSecurityGroupIDs) @@ -250,19 +251,19 @@ func resourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData func resourceReplicationInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll, names.AttrAllowMajorVersionUpgrade) { // Having allowing_major_version_upgrade by itself should not trigger ModifyReplicationInstance // as it results in InvalidParameterCombination: No modifications were requested input := &dms.ModifyReplicationInstanceInput{ - AllowMajorVersionUpgrade: aws.Bool(d.Get(names.AttrAllowMajorVersionUpgrade).(bool)), - ApplyImmediately: aws.Bool(d.Get(names.AttrApplyImmediately).(bool)), + AllowMajorVersionUpgrade: d.Get(names.AttrAllowMajorVersionUpgrade).(bool), + ApplyImmediately: d.Get(names.AttrApplyImmediately).(bool), ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), } if d.HasChange(names.AttrAllocatedStorage) { - input.AllocatedStorage = aws.Int64(int64(d.Get(names.AttrAllocatedStorage).(int))) + input.AllocatedStorage = aws.Int32(int32(d.Get(names.AttrAllocatedStorage).(int))) } if d.HasChange(names.AttrAutoMinorVersionUpgrade) { @@ -290,10 +291,10 @@ func resourceReplicationInstanceUpdate(ctx context.Context, d *schema.ResourceDa } if d.HasChange(names.AttrVPCSecurityGroupIDs) { - input.VpcSecurityGroupIds = flex.ExpandStringSet(d.Get(names.AttrVPCSecurityGroupIDs).(*schema.Set)) + input.VpcSecurityGroupIds = flex.ExpandStringValueSet(d.Get(names.AttrVPCSecurityGroupIDs).(*schema.Set)) } - _, err := conn.ModifyReplicationInstanceWithContext(ctx, input) + _, err := conn.ModifyReplicationInstance(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DMS Replication Instance (%s): %s", d.Id(), err) @@ -309,14 +310,14 @@ func resourceReplicationInstanceUpdate(ctx context.Context, d *schema.ResourceDa func resourceReplicationInstanceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Replication Instance: %s", d.Id()) - _, err := conn.DeleteReplicationInstanceWithContext(ctx, &dms.DeleteReplicationInstanceInput{ + _, err := conn.DeleteReplicationInstance(ctx, &dms.DeleteReplicationInstanceInput{ ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -331,12 +332,12 @@ func resourceReplicationInstanceDelete(ctx context.Context, d *schema.ResourceDa return diags } -func FindReplicationInstanceByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationInstance, error) { +func findReplicationInstanceByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationInstance, error) { input := &dms.DescribeReplicationInstancesInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("replication-instance-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -344,50 +345,43 @@ func FindReplicationInstanceByID(ctx context.Context, conn *dms.DatabaseMigratio return findReplicationInstance(ctx, conn, input) } -func findReplicationInstance(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationInstancesInput) (*dms.ReplicationInstance, error) { +func findReplicationInstance(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationInstancesInput) (*awstypes.ReplicationInstance, error) { output, err := findReplicationInstances(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationInstances(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationInstancesInput) ([]*dms.ReplicationInstance, error) { - var output []*dms.ReplicationInstance +func findReplicationInstances(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationInstancesInput) ([]awstypes.ReplicationInstance, error) { + var output []awstypes.ReplicationInstance - err := conn.DescribeReplicationInstancesPagesWithContext(ctx, input, func(page *dms.DescribeReplicationInstancesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationInstancesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationInstances { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationInstances...) } return output, nil } -func statusReplicationInstance(ctx context.Context, conn *dms.DatabaseMigrationService, id string) retry.StateRefreshFunc { +func statusReplicationInstance(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindReplicationInstanceByID(ctx, conn, id) + output, err := findReplicationInstanceByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -397,11 +391,11 @@ func statusReplicationInstance(ctx context.Context, conn *dms.DatabaseMigrationS return nil, "", err } - return output, aws.StringValue(output.ReplicationInstanceStatus), nil + return output, aws.ToString(output.ReplicationInstanceStatus), nil } } -func waitReplicationInstanceCreated(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationInstance, error) { +func waitReplicationInstanceCreated(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationInstance, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusCreating, replicationInstanceStatusModifying}, Target: []string{replicationInstanceStatusAvailable}, @@ -413,14 +407,14 @@ func waitReplicationInstanceCreated(ctx context.Context, conn *dms.DatabaseMigra outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationInstance); ok { + if output, ok := outputRaw.(*awstypes.ReplicationInstance); ok { return output, err } return nil, err } -func waitReplicationInstanceUpdated(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationInstance, error) { +func waitReplicationInstanceUpdated(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationInstance, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusModifying, replicationInstanceStatusUpgrading}, Target: []string{replicationInstanceStatusAvailable}, @@ -432,14 +426,14 @@ func waitReplicationInstanceUpdated(ctx context.Context, conn *dms.DatabaseMigra outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationInstance); ok { + if output, ok := outputRaw.(*awstypes.ReplicationInstance); ok { return output, err } return nil, err } -func waitReplicationInstanceDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationInstance, error) { +func waitReplicationInstanceDeleted(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationInstance, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusDeleting}, Target: []string{}, @@ -451,7 +445,7 @@ func waitReplicationInstanceDeleted(ctx context.Context, conn *dms.DatabaseMigra outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationInstance); ok { + if output, ok := outputRaw.(*awstypes.ReplicationInstance); ok { return output, err } diff --git a/internal/service/dms/replication_instance_data_source.go b/internal/service/dms/replication_instance_data_source.go index afa7b0056f6..bcd3a2075ff 100644 --- a/internal/service/dms/replication_instance_data_source.go +++ b/internal/service/dms/replication_instance_data_source.go @@ -6,8 +6,8 @@ package dms import ( "context" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_replication_instance") -func DataSourceReplicationInstance() *schema.Resource { +// @SDKDataSource("aws_dms_replication_instance", name="Replication Instance") +func dataSourceReplicationInstance() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReplicationInstanceRead, @@ -98,18 +98,18 @@ func DataSourceReplicationInstance() *schema.Resource { func dataSourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig rID := d.Get("replication_instance_id").(string) - instance, err := FindReplicationInstanceByID(ctx, conn, rID) + instance, err := findReplicationInstanceByID(ctx, conn, rID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Replication Instance (%s): %s", rID, err) } - d.SetId(aws.StringValue(instance.ReplicationInstanceIdentifier)) + d.SetId(aws.ToString(instance.ReplicationInstanceIdentifier)) d.Set(names.AttrAllocatedStorage, instance.AllocatedStorage) d.Set(names.AttrAutoMinorVersionUpgrade, instance.AutoMinorVersionUpgrade) d.Set(names.AttrAvailabilityZone, instance.AvailabilityZone) @@ -119,15 +119,15 @@ func dataSourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceDa d.Set("network_type", instance.NetworkType) d.Set(names.AttrPreferredMaintenanceWindow, instance.PreferredMaintenanceWindow) d.Set(names.AttrPubliclyAccessible, instance.PubliclyAccessible) - arn := aws.StringValue(instance.ReplicationInstanceArn) + arn := aws.ToString(instance.ReplicationInstanceArn) d.Set("replication_instance_arn", arn) d.Set("replication_instance_class", instance.ReplicationInstanceClass) d.Set("replication_instance_id", instance.ReplicationInstanceIdentifier) - d.Set("replication_instance_private_ips", aws.StringValueSlice(instance.ReplicationInstancePrivateIpAddresses)) - d.Set("replication_instance_public_ips", aws.StringValueSlice(instance.ReplicationInstancePublicIpAddresses)) + d.Set("replication_instance_private_ips", instance.ReplicationInstancePrivateIpAddresses) + d.Set("replication_instance_public_ips", instance.ReplicationInstancePublicIpAddresses) d.Set("replication_subnet_group_id", instance.ReplicationSubnetGroup.ReplicationSubnetGroupIdentifier) - vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(sg *dms.VpcSecurityGroupMembership) string { - return aws.StringValue(sg.VpcSecurityGroupId) + vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(sg awstypes.VpcSecurityGroupMembership) string { + return aws.ToString(sg.VpcSecurityGroupId) }) d.Set(names.AttrVPCSecurityGroupIDs, vpcSecurityGroupIDs) diff --git a/internal/service/dms/replication_instance_test.go b/internal/service/dms/replication_instance_test.go index bf95f735f98..c4a4d93dc11 100644 --- a/internal/service/dms/replication_instance_test.go +++ b/internal/service/dms/replication_instance_test.go @@ -48,7 +48,7 @@ func TestAccDMSReplicationInstance_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "replication_instance_class", replicationInstanceClass), resource.TestCheckResourceAttr(resourceName, "replication_instance_id", rName), resource.TestCheckResourceAttr(resourceName, "replication_instance_private_ips.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "replication_instance_public_ips.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replication_instance_public_ips.#", acctest.Ct0), resource.TestCheckResourceAttrSet(resourceName, "replication_subnet_group_id"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "vpc_security_group_ids.#", acctest.Ct1), @@ -519,7 +519,7 @@ func testAccCheckReplicationInstanceExists(ctx context.Context, n string) resour return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationInstanceByID(ctx, conn, rs.Primary.ID) @@ -529,7 +529,7 @@ func testAccCheckReplicationInstanceExists(ctx context.Context, n string) resour func testAccCheckReplicationInstanceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dms_replication_instance" { diff --git a/internal/service/dms/replication_subnet_group.go b/internal/service/dms/replication_subnet_group.go index 193ada4df24..6094f6376e9 100644 --- a/internal/service/dms/replication_subnet_group.go +++ b/internal/service/dms/replication_subnet_group.go @@ -8,14 +8,15 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -27,7 +28,7 @@ import ( // @SDKResource("aws_dms_replication_subnet_group", name="Replication Subnet Group") // @Tags(identifierAttribute="replication_subnet_group_arn") -func ResourceReplicationSubnetGroup() *schema.Resource { +func resourceReplicationSubnetGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationSubnetGroupCreate, ReadWithoutTimeout: resourceReplicationSubnetGroupRead, @@ -73,20 +74,19 @@ func ResourceReplicationSubnetGroup() *schema.Resource { func resourceReplicationSubnetGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) replicationSubnetGroupID := d.Get("replication_subnet_group_id").(string) input := &dms.CreateReplicationSubnetGroupInput{ ReplicationSubnetGroupDescription: aws.String(d.Get("replication_subnet_group_description").(string)), ReplicationSubnetGroupIdentifier: aws.String(replicationSubnetGroupID), - SubnetIds: flex.ExpandStringSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), Tags: getTagsIn(ctx), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateReplicationSubnetGroupWithContext(ctx, input) - }, dms.ErrCodeAccessDeniedFault) - + _, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, propagationTimeout, func() (interface{}, error) { + return conn.CreateReplicationSubnetGroup(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Subnet Group (%s): %s", replicationSubnetGroupID, err) } @@ -98,9 +98,9 @@ func resourceReplicationSubnetGroupCreate(ctx context.Context, d *schema.Resourc func resourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - group, err := FindReplicationSubnetGroupByID(ctx, conn, d.Id()) + group, err := findReplicationSubnetGroupByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Subnet Group (%s) not found, removing from state", d.Id()) @@ -124,8 +124,8 @@ func resourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceD d.Set("replication_subnet_group_arn", arn) d.Set("replication_subnet_group_description", group.ReplicationSubnetGroupDescription) d.Set("replication_subnet_group_id", group.ReplicationSubnetGroupIdentifier) - subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn *dms.Subnet) string { - return aws.StringValue(sn.SubnetIdentifier) + subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn awstypes.Subnet) string { + return aws.ToString(sn.SubnetIdentifier) }) d.Set(names.AttrSubnetIDs, subnetIDs) d.Set(names.AttrVPCID, group.VpcId) @@ -135,21 +135,21 @@ func resourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceD func resourceReplicationSubnetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { // Updates to subnet groups are only valid when sending SubnetIds even if there are no // changes to SubnetIds. input := &dms.ModifyReplicationSubnetGroupInput{ ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), - SubnetIds: flex.ExpandStringSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), } if d.HasChange("replication_subnet_group_description") { input.ReplicationSubnetGroupDescription = aws.String(d.Get("replication_subnet_group_description").(string)) } - _, err := conn.ModifyReplicationSubnetGroupWithContext(ctx, input) + _, err := conn.ModifyReplicationSubnetGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DMS Replication Subnet Group (%s): %s", d.Id(), err) @@ -161,14 +161,14 @@ func resourceReplicationSubnetGroupUpdate(ctx context.Context, d *schema.Resourc func resourceReplicationSubnetGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Replication Subnet Group: %s", d.Id()) - _, err := conn.DeleteReplicationSubnetGroupWithContext(ctx, &dms.DeleteReplicationSubnetGroupInput{ + _, err := conn.DeleteReplicationSubnetGroup(ctx, &dms.DeleteReplicationSubnetGroupInput{ ReplicationSubnetGroupIdentifier: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -179,12 +179,12 @@ func resourceReplicationSubnetGroupDelete(ctx context.Context, d *schema.Resourc return diags } -func FindReplicationSubnetGroupByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationSubnetGroup, error) { +func findReplicationSubnetGroupByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationSubnetGroup, error) { input := &dms.DescribeReplicationSubnetGroupsInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("replication-subnet-group-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -192,42 +192,35 @@ func FindReplicationSubnetGroupByID(ctx context.Context, conn *dms.DatabaseMigra return findReplicationSubnetGroup(ctx, conn, input) } -func findReplicationSubnetGroup(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationSubnetGroupsInput) (*dms.ReplicationSubnetGroup, error) { +func findReplicationSubnetGroup(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationSubnetGroupsInput) (*awstypes.ReplicationSubnetGroup, error) { output, err := findReplicationSubnetGroups(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationSubnetGroups(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationSubnetGroupsInput) ([]*dms.ReplicationSubnetGroup, error) { - var output []*dms.ReplicationSubnetGroup +func findReplicationSubnetGroups(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationSubnetGroupsInput) ([]awstypes.ReplicationSubnetGroup, error) { + var output []awstypes.ReplicationSubnetGroup - err := conn.DescribeReplicationSubnetGroupsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationSubnetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationSubnetGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationSubnetGroups { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationSubnetGroups...) } return output, nil diff --git a/internal/service/dms/replication_subnet_group_data_source.go b/internal/service/dms/replication_subnet_group_data_source.go index 3f4f27c60ae..435f386c411 100644 --- a/internal/service/dms/replication_subnet_group_data_source.go +++ b/internal/service/dms/replication_subnet_group_data_source.go @@ -7,9 +7,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -19,8 +19,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_replication_subnet_group") -func DataSourceReplicationSubnetGroup() *schema.Resource { +// @SDKDataSource("aws_dms_replication_subnet_group", name="Replication Subnet Group") +func dataSourceReplicationSubnetGroup() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReplicationSubnetGroupRead, @@ -57,18 +57,18 @@ func DataSourceReplicationSubnetGroup() *schema.Resource { func dataSourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig replicationSubnetGroupID := d.Get("replication_subnet_group_id").(string) - group, err := FindReplicationSubnetGroupByID(ctx, conn, replicationSubnetGroupID) + group, err := findReplicationSubnetGroupByID(ctx, conn, replicationSubnetGroupID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Replication Subnet Group (%s): %s", replicationSubnetGroupID, err) } - d.SetId(aws.StringValue(group.ReplicationSubnetGroupIdentifier)) + d.SetId(aws.ToString(group.ReplicationSubnetGroupIdentifier)) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, Service: "dms", @@ -79,8 +79,8 @@ func dataSourceReplicationSubnetGroupRead(ctx context.Context, d *schema.Resourc d.Set("replication_subnet_group_arn", arn) d.Set("replication_subnet_group_description", group.ReplicationSubnetGroupDescription) d.Set("replication_subnet_group_id", group.ReplicationSubnetGroupIdentifier) - subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn *dms.Subnet) string { - return aws.StringValue(sn.SubnetIdentifier) + subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn awstypes.Subnet) string { + return aws.ToString(sn.SubnetIdentifier) }) d.Set(names.AttrSubnetIDs, subnetIDs) d.Set(names.AttrVPCID, group.VpcId) diff --git a/internal/service/dms/replication_subnet_group_test.go b/internal/service/dms/replication_subnet_group_test.go index acd4d5ad4dc..ad41eed6cc8 100644 --- a/internal/service/dms/replication_subnet_group_test.go +++ b/internal/service/dms/replication_subnet_group_test.go @@ -132,7 +132,7 @@ func testAccCheckReplicationSubnetGroupExists(ctx context.Context, n string) res return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationSubnetGroupByID(ctx, conn, rs.Primary.ID) @@ -142,7 +142,7 @@ func testAccCheckReplicationSubnetGroupExists(ctx context.Context, n string) res func testAccCheckReplicationSubnetGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dms_replication_subnet_group" { diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 7375976c24b..6e42a43a161 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -11,14 +11,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -29,7 +31,7 @@ import ( // @SDKResource("aws_dms_replication_task", name="Replication Task") // @Tags(identifierAttribute="replication_task_arn") -func ResourceReplicationTask() *schema.Resource { +func resourceReplicationTask() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationTaskCreate, ReadWithoutTimeout: resourceReplicationTaskRead, @@ -54,9 +56,9 @@ func ResourceReplicationTask() *schema.Resource { ConflictsWith: []string{"cdc_start_position"}, }, "migration_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.MigrationTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.MigrationTypeValue](), }, "replication_instance_arn": { Type: schema.TypeString, @@ -134,11 +136,11 @@ func ResourceReplicationTask() *schema.Resource { func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) taskID := d.Get("replication_task_id").(string) input := &dms.CreateReplicationTaskInput{ - MigrationType: aws.String(d.Get("migration_type").(string)), + MigrationType: awstypes.MigrationTypeValue(d.Get("migration_type").(string)), ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), ReplicationTaskIdentifier: aws.String(taskID), SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), @@ -168,7 +170,7 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, input.ResourceIdentifier = aws.String(v.(string)) } - _, err := conn.CreateReplicationTaskWithContext(ctx, input) + _, err := conn.CreateReplicationTask(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Task (%s): %s", taskID, err) @@ -191,9 +193,9 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, func resourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - task, err := FindReplicationTaskByID(ctx, conn, d.Id()) + task, err := findReplicationTaskByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Task (%s) not found, removing from state", d.Id()) @@ -221,7 +223,7 @@ func resourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, me func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll, "replication_instance_arn", "start_replication_task") { if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { @@ -229,7 +231,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } input := &dms.ModifyReplicationTaskInput{ - MigrationType: aws.String(d.Get("migration_type").(string)), + MigrationType: awstypes.MigrationTypeValue(d.Get("migration_type").(string)), ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), TableMappings: aws.String(d.Get("table_mappings").(string)), } @@ -259,7 +261,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } } - _, err := conn.ModifyReplicationTaskWithContext(ctx, input) + _, err := conn.ModifyReplicationTask(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying DMS Replication Task (%s): %s", d.Id(), err) @@ -286,7 +288,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, TargetReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), } - _, err := conn.MoveReplicationTaskWithContext(ctx, input) + _, err := conn.MoveReplicationTask(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "moving DMS Replication Task (%s): %s", d.Id(), err) @@ -304,7 +306,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChanges("start_replication_task") { - var f func(context.Context, *dms.DatabaseMigrationService, string) error + var f func(context.Context, *dms.Client, string) error if d.Get("start_replication_task").(bool) { f = startReplicationTask } else { @@ -320,18 +322,18 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting DMS Replication Task: %s", d.Id()) - _, err := conn.DeleteReplicationTaskWithContext(ctx, &dms.DeleteReplicationTaskInput{ + _, err := conn.DeleteReplicationTask(ctx, &dms.DeleteReplicationTaskInput{ ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -346,12 +348,12 @@ func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, return diags } -func FindReplicationTaskByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func findReplicationTaskByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { input := &dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("replication-task-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -359,50 +361,43 @@ func FindReplicationTaskByID(ctx context.Context, conn *dms.DatabaseMigrationSer return findReplicationTask(ctx, conn, input) } -func findReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationTasksInput) (*dms.ReplicationTask, error) { +func findReplicationTask(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationTasksInput) (*awstypes.ReplicationTask, error) { output, err := findReplicationTasks(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationTasksInput) ([]*dms.ReplicationTask, error) { - var output []*dms.ReplicationTask +func findReplicationTasks(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationTasksInput) ([]awstypes.ReplicationTask, error) { + var output []awstypes.ReplicationTask - err := conn.DescribeReplicationTasksPagesWithContext(ctx, input, func(page *dms.DescribeReplicationTasksOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationTasksPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationTasks { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationTasks...) } return output, nil } -func statusReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) retry.StateRefreshFunc { +func statusReplicationTask(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindReplicationTaskByID(ctx, conn, id) + output, err := findReplicationTaskByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -412,24 +407,24 @@ func statusReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServi return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func setLastReplicationTaskError(err error, replication *dms.ReplicationTask) { +func setLastReplicationTaskError(err error, replication *awstypes.ReplicationTask) { var errs []error - if v := aws.StringValue(replication.LastFailureMessage); v != "" { + if v := aws.ToString(replication.LastFailureMessage); v != "" { errs = append(errs, errors.New(v)) } - if v := aws.StringValue(replication.StopReason); v != "" { + if v := aws.ToString(replication.StopReason); v != "" { errs = append(errs, errors.New(v)) } tfresource.SetLastError(err, errors.Join(errs...)) } -func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskDeleted(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusDeleting}, Target: []string{}, @@ -441,7 +436,7 @@ func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigration outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -449,7 +444,7 @@ func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigration return nil, err } -func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskModified(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusModifying}, Target: []string{replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusFailed}, @@ -461,7 +456,7 @@ func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigratio outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -469,7 +464,7 @@ func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigratio return nil, err } -func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskMoved(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusModifying, replicationTaskStatusMoving}, Target: []string{replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusFailed}, @@ -481,7 +476,7 @@ func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationSe outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -489,7 +484,7 @@ func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationSe return nil, err } -func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskReady(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusCreating}, Target: []string{replicationTaskStatusReady}, @@ -501,7 +496,7 @@ func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationSe outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -509,7 +504,7 @@ func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationSe return nil, err } -func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func waitReplicationTaskRunning(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { const ( timeout = 5 * time.Minute ) @@ -524,7 +519,7 @@ func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigration outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -532,7 +527,7 @@ func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigration return nil, err } -func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func waitReplicationTaskStopped(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { const ( timeout = 5 * time.Minute ) @@ -548,7 +543,7 @@ func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigration outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -556,7 +551,7 @@ func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigration return nil, err } -func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func waitReplicationTaskSteady(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { const ( timeout = 5 * time.Minute ) @@ -572,7 +567,7 @@ func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationS outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -580,28 +575,28 @@ func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationS return nil, err } -func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { - task, err := FindReplicationTaskByID(ctx, conn, id) +func startReplicationTask(ctx context.Context, conn *dms.Client, id string) error { + task, err := findReplicationTaskByID(ctx, conn, id) if err != nil { return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) } - taskStatus := aws.StringValue(task.Status) + taskStatus := aws.ToString(task.Status) if taskStatus == replicationTaskStatusRunning { return nil } - startReplicationTaskType := dms.StartReplicationTaskTypeValueStartReplication + startReplicationTaskType := awstypes.StartReplicationTaskTypeValueStartReplication if taskStatus != replicationTaskStatusReady { - startReplicationTaskType = dms.StartReplicationTaskTypeValueResumeProcessing + startReplicationTaskType = awstypes.StartReplicationTaskTypeValueResumeProcessing } input := &dms.StartReplicationTaskInput{ ReplicationTaskArn: task.ReplicationTaskArn, - StartReplicationTaskType: aws.String(startReplicationTaskType), + StartReplicationTaskType: startReplicationTaskType, } - _, err = conn.StartReplicationTaskWithContext(ctx, input) + _, err = conn.StartReplicationTask(ctx, input) if err != nil { return fmt.Errorf("starting DMS Replication Task (%s): %w", id, err) @@ -614,8 +609,8 @@ func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServic return nil } -func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { - task, err := FindReplicationTaskByID(ctx, conn, id) +func stopReplicationTask(ctx context.Context, conn *dms.Client, id string) error { + task, err := findReplicationTaskByID(ctx, conn, id) if tfresource.NotFound(err) { return nil @@ -625,7 +620,7 @@ func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) } - taskStatus := aws.StringValue(task.Status) + taskStatus := aws.ToString(task.Status) if taskStatus != replicationTaskStatusRunning { return nil } @@ -634,9 +629,9 @@ func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService ReplicationTaskArn: task.ReplicationTaskArn, } - _, err = conn.StopReplicationTaskWithContext(ctx, input) + _, err = conn.StopReplicationTask(ctx, input) - if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "is currently not running") { + if errs.IsAErrorMessageContains[*awstypes.InvalidResourceStateFault](err, "is currently not running") { return nil } diff --git a/internal/service/dms/replication_task_data_source.go b/internal/service/dms/replication_task_data_source.go index 57faa6b7e3c..6c8db2e6e0a 100644 --- a/internal/service/dms/replication_task_data_source.go +++ b/internal/service/dms/replication_task_data_source.go @@ -6,7 +6,7 @@ package dms import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -15,8 +15,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_replication_task") -func DataSourceReplicationTask() *schema.Resource { +// @SDKDataSource("aws_dms_replication_task", name="Replication Task") +func dataSourceReplicationTask() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReplicationTaskRead, @@ -77,19 +77,18 @@ func DataSourceReplicationTask() *schema.Resource { func dataSourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig taskID := d.Get("replication_task_id").(string) - - task, err := FindReplicationTaskByID(ctx, conn, taskID) + task, err := findReplicationTaskByID(ctx, conn, taskID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Replication Task (%s): %s", taskID, err) } - d.SetId(aws.StringValue(task.ReplicationTaskIdentifier)) + d.SetId(aws.ToString(task.ReplicationTaskIdentifier)) d.Set("cdc_start_position", task.CdcStartPosition) d.Set("migration_type", task.MigrationType) d.Set("replication_instance_arn", task.ReplicationInstanceArn) @@ -101,7 +100,7 @@ func dataSourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, d.Set("table_mappings", task.TableMappings) d.Set("target_endpoint_arn", task.TargetEndpointArn) - tags, err := listTags(ctx, conn, aws.StringValue(task.ReplicationTaskArn)) + tags, err := listTags(ctx, conn, aws.ToString(task.ReplicationTaskArn)) if err != nil { return sdkdiag.AppendErrorf(diags, "listing DMS Replication Task (%s) tags: %s", d.Id(), err) diff --git a/internal/service/dms/replication_task_data_source_test.go b/internal/service/dms/replication_task_data_source_test.go index 06ca0f53db2..5f738c76c6d 100644 --- a/internal/service/dms/replication_task_data_source_test.go +++ b/internal/service/dms/replication_task_data_source_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -19,7 +19,7 @@ func TestAccDMSReplicationTaskDataSource_basic(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" dataSourceName := "data.aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index bfd34cdbe32..77a881ceaad 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -13,15 +13,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -30,12 +31,12 @@ import ( func TestAccDMSReplicationTask_basic(t *testing.T) { t.Parallel() - for _, migrationType := range dms.MigrationTypeValue_Values() { //nolint:paralleltest // false positive + for _, migrationType := range enum.Values[awstypes.MigrationTypeValue]() { //nolint:paralleltest // false positive t.Run(migrationType, func(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -79,7 +80,7 @@ func TestAccDMSReplicationTask_updateSettingsAndMappings(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -185,7 +186,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -202,7 +203,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.LogComponents[?Id=='DATA_STRUCTURE'].Severity | [0]", "LOGGER_SEVERITY_DEFAULT"), acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.CloudWatchLogGroup", fmt.Sprintf("dms-tasks-%s", rName)), func(s *terraform.State) error { - arn, err := arn.Parse(aws.StringValue(v.ReplicationTaskArn)) + arn, err := arn.Parse(aws.ToString(v.ReplicationTaskArn)) if err != nil { return err } @@ -230,7 +231,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.LogComponents[?Id=='DATA_STRUCTURE'].Severity | [0]", "LOGGER_SEVERITY_DEFAULT"), acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.CloudWatchLogGroup", fmt.Sprintf("dms-tasks-%s", rName)), func(s *terraform.State) error { - arn, err := arn.Parse(aws.StringValue(v.ReplicationTaskArn)) + arn, err := arn.Parse(aws.ToString(v.ReplicationTaskArn)) if err != nil { return err } @@ -263,7 +264,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.LogComponents[?Id=='DATA_STRUCTURE'].Severity | [0]", "LOGGER_SEVERITY_DEFAULT"), acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.CloudWatchLogGroup", fmt.Sprintf("dms-tasks-%s", rName)), func(s *terraform.State) error { - arn, err := arn.Parse(aws.StringValue(v.ReplicationTaskArn)) + arn, err := arn.Parse(aws.ToString(v.ReplicationTaskArn)) if err != nil { return err } @@ -321,7 +322,7 @@ func TestAccDMSReplicationTask_settings_LogComponents(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -354,7 +355,7 @@ func TestAccDMSReplicationTask_settings_StreamBuffer(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -385,7 +386,7 @@ func TestAccDMSReplicationTask_cdcStartPosition(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -415,7 +416,7 @@ func TestAccDMSReplicationTask_resourceIdentifier(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -448,7 +449,7 @@ func TestAccDMSReplicationTask_startReplicationTask(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -491,7 +492,7 @@ func TestAccDMSReplicationTask_s3ToRDS(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask //https://github.com/hashicorp/terraform-provider-aws/issues/28277 @@ -521,7 +522,7 @@ func TestAccDMSReplicationTask_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -545,7 +546,7 @@ func TestAccDMSReplicationTask_cdcStartTime_rfc3339_date(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask currentTime := time.Now().UTC() rfc3339Time := currentTime.Format(time.RFC3339) @@ -578,7 +579,7 @@ func TestAccDMSReplicationTask_cdcStartTime_unix_timestamp(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask currentTime := time.Now().UTC() rfc3339Time := currentTime.Format(time.RFC3339) @@ -615,7 +616,7 @@ func TestAccDMSReplicationTask_move(t *testing.T) { resourceName := "aws_dms_replication_task.test" instanceOne := "aws_dms_replication_instance.test" instanceTwo := "aws_dms_replication_instance.test2" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -645,14 +646,14 @@ func TestAccDMSReplicationTask_move(t *testing.T) { }) } -func testAccCheckReplicationTaskExists(ctx context.Context, n string, v *dms.ReplicationTask) resource.TestCheckFunc { +func testAccCheckReplicationTaskExists(ctx context.Context, n string, v *awstypes.ReplicationTask) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) output, err := tfdms.FindReplicationTaskByID(ctx, conn, rs.Primary.ID) @@ -673,7 +674,7 @@ func testAccCheckReplicationTaskDestroy(ctx context.Context) resource.TestCheckF continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationTaskByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/s3_endpoint.go b/internal/service/dms/s3_endpoint.go index 90833c20427..5357182061e 100644 --- a/internal/service/dms/s3_endpoint.go +++ b/internal/service/dms/s3_endpoint.go @@ -5,22 +5,22 @@ package dms import ( "context" - "errors" "fmt" "log" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -29,7 +29,7 @@ import ( // @SDKResource("aws_dms_s3_endpoint", name="S3 Endpoint") // @Tags(identifierAttribute="endpoint_arn") -func ResourceS3Endpoint() *schema.Resource { +func resourceS3Endpoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceS3EndpointCreate, ReadWithoutTimeout: resourceS3EndpointRead, @@ -63,9 +63,9 @@ func ResourceS3Endpoint() *schema.Resource { ValidateFunc: validEndpointID, }, names.AttrEndpointType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.ReplicationEndpointTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ReplicationEndpointTypeValue](), }, "engine_display_name": { Type: schema.TypeString, @@ -83,10 +83,10 @@ func ResourceS3Endpoint() *schema.Resource { ValidateFunc: verify.ValidARN, }, "ssl_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DmsSslModeValue_Values(), false), + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DmsSslModeValue](), }, names.AttrStatus: { Type: schema.TypeString, @@ -114,9 +114,9 @@ func ResourceS3Endpoint() *schema.Resource { Required: true, }, "canned_acl_for_objects": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.CannedAclForObjectsValue_Values(), true), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.CannedAclForObjectsValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -146,10 +146,10 @@ func ResourceS3Endpoint() *schema.Resource { Optional: true, }, "compression_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.CompressionTypeValue_Values(), true), - Default: strings.ToUpper(dms.CompressionTypeValueNone), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.CompressionTypeValue](), + Default: strings.ToUpper(string(awstypes.CompressionTypeValueNone)), StateFunc: func(v interface{}) string { return strings.ToUpper(v.(string)) }, @@ -173,9 +173,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: "\\n", }, "data_format": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DataFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DataFormatValue](), }, "data_page_size": { Type: schema.TypeInt, @@ -183,9 +183,9 @@ func ResourceS3Endpoint() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "date_partition_delimiter": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DatePartitionDelimiterValue_Values(), true), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionDelimiterValue](), StateFunc: func(v interface{}) string { return strings.ToUpper(v.(string)) }, @@ -196,9 +196,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: false, }, "date_partition_sequence": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DatePartitionSequenceValue_Values(), true), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionSequenceValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -222,9 +222,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: true, }, "encoding_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.EncodingTypeValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.EncodingTypeValue](), }, "encryption_mode": { Type: schema.TypeString, @@ -272,9 +272,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: false, }, "parquet_version": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.ParquetVersionValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ParquetVersionValue](), }, "preserve_transactions": { Type: schema.TypeBool, @@ -321,17 +321,14 @@ func ResourceS3Endpoint() *schema.Resource { } } -const ( - ResNameS3Endpoint = "S3 Endpoint" -) - func resourceS3EndpointCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) + endpointID := d.Get("endpoint_id").(string) input := &dms.CreateEndpointInput{ - EndpointIdentifier: aws.String(d.Get("endpoint_id").(string)), - EndpointType: aws.String(d.Get(names.AttrEndpointType).(string)), + EndpointIdentifier: aws.String(endpointID), + EndpointType: awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)), EngineName: aws.String("s3"), Tags: getTagsIn(ctx), } @@ -344,46 +341,28 @@ func resourceS3EndpointCreate(ctx context.Context, d *schema.ResourceData, meta input.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("ssl_mode"); ok { - input.SslMode = aws.String(v.(string)) - } - if v, ok := d.GetOk("service_access_role_arn"); ok { input.ServiceAccessRoleArn = aws.String(v.(string)) } - input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == dms.ReplicationEndpointTypeValueTarget) - - input.ExtraConnectionAttributes = extraConnectionAnomalies(d) - - log.Println("[DEBUG] DMS create endpoint:", input) - - var out *dms.CreateEndpointOutput - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { - var err error - out, err = conn.CreateEndpointWithContext(ctx, input) + if v, ok := d.GetOk("ssl_mode"); ok { + input.SslMode = awstypes.DmsSslModeValue(v.(string)) + } - if tfawserr.ErrCodeEquals(err, "AccessDeniedFault") { - return retry.RetryableError(err) - } + input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == string(awstypes.ReplicationEndpointTypeValueTarget)) - if err != nil { - return retry.NonRetryableError(err) - } + input.ExtraConnectionAttributes = extraConnectionAnomalies(d) - return nil + outputRaw, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { + return conn.CreateEndpoint(ctx, input) }) - if tfresource.TimedOut(err) { - out, err = conn.CreateEndpointWithContext(ctx, input) - } - - if err != nil || out == nil || out.Endpoint == nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionCreating, ResNameS3Endpoint, d.Get("endpoint_id").(string), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating DMS S3 Endpoint (%s): %s", endpointID, err) } - d.SetId(d.Get("endpoint_id").(string)) - d.Set("endpoint_arn", out.Endpoint.EndpointArn) + d.SetId(endpointID) + d.Set("endpoint_arn", outputRaw.(*dms.CreateEndpointOutput).Endpoint.EndpointArn) // AWS bug? ssekki is ignored on create but sets on update if _, ok := d.GetOk("server_side_encryption_kms_key_id"); ok { @@ -395,9 +374,9 @@ func resourceS3EndpointCreate(ctx context.Context, d *schema.ResourceData, meta func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - endpoint, err := FindEndpointByID(ctx, conn, d.Id()) + endpoint, err := findEndpointByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Endpoint (%s) not found, removing from state", d.Id()) @@ -405,19 +384,18 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in return diags } - if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionReading, ResNameS3Endpoint, d.Id(), err) + if err == nil && endpoint.S3Settings == nil { + err = tfresource.NewEmptyResultError(nil) } - if endpoint.S3Settings == nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionReading, ResNameS3Endpoint, d.Id(), errors.New("no settings returned")) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating DMS S3 Endpoint (%s): %s", d.Id(), err) } d.Set("endpoint_arn", endpoint.EndpointArn) - d.Set(names.AttrCertificateARN, endpoint.CertificateArn) d.Set("endpoint_id", endpoint.EndpointIdentifier) - d.Set(names.AttrEndpointType, strings.ToLower(*endpoint.EndpointType)) // For some reason the AWS API only accepts lowercase type but returns it as uppercase + d.Set(names.AttrEndpointType, strings.ToLower(string(endpoint.EndpointType))) // For some reason the AWS API only accepts lowercase type but returns it as uppercase d.Set("engine_display_name", endpoint.EngineDisplayName) d.Set(names.AttrExternalID, endpoint.ExternalId) // d.Set("external_table_definition", endpoint.ExternalTableDefinition) // set from s3 settings @@ -426,7 +404,7 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("ssl_mode", endpoint.SslMode) d.Set(names.AttrStatus, endpoint.Status) - setDetachTargetOnLobLookupFailureParquet(d, aws.StringValue(endpoint.ExtraConnectionAttributes)) + setDetachTargetOnLobLookupFailureParquet(d, aws.ToString(endpoint.ExtraConnectionAttributes)) s3settings := endpoint.S3Settings d.Set("add_column_name", s3settings.AddColumnName) @@ -455,12 +433,12 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("timestamp_column_name", s3settings.TimestampColumnName) d.Set("use_task_start_time_for_full_load_timestamp", s3settings.UseTaskStartTimeForFullLoadTimestamp) - if d.Get(names.AttrEndpointType).(string) == dms.ReplicationEndpointTypeValueTarget { + if d.Get(names.AttrEndpointType).(string) == string(awstypes.ReplicationEndpointTypeValueTarget) { d.Set("add_trailing_padding_character", s3settings.AddTrailingPaddingCharacter) d.Set("compression_type", s3settings.CompressionType) d.Set("csv_no_sup_value", s3settings.CsvNoSupValue) d.Set("data_format", s3settings.DataFormat) - d.Set("date_partition_delimiter", strings.ToUpper(aws.StringValue(s3settings.DatePartitionDelimiter))) + d.Set("date_partition_delimiter", strings.ToUpper(string(s3settings.DatePartitionDelimiter))) d.Set("date_partition_enabled", s3settings.DatePartitionEnabled) d.Set("date_partition_sequence", s3settings.DatePartitionSequence) d.Set("date_partition_timezone", s3settings.DatePartitionTimezone) @@ -473,9 +451,9 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("use_csv_no_sup_value", s3settings.UseCsvNoSupValue) } - p, err := structure.NormalizeJsonString(aws.StringValue(s3settings.ExternalTableDefinition)) + p, err := structure.NormalizeJsonString(aws.ToString(s3settings.ExternalTableDefinition)) if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionSetting, ResNameS3Endpoint, d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } d.Set("external_table_definition", p) @@ -485,7 +463,7 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &dms.ModifyEndpointInput{ @@ -497,13 +475,13 @@ func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange(names.AttrEndpointType) { - input.EndpointType = aws.String(d.Get(names.AttrEndpointType).(string)) + input.EndpointType = awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)) } input.EngineName = aws.String(engineNameS3) if d.HasChange("ssl_mode") { - input.SslMode = aws.String(d.Get("ssl_mode").(string)) + input.SslMode = awstypes.DmsSslModeValue(d.Get("ssl_mode").(string)) } if d.HasChangesExcept( @@ -511,34 +489,18 @@ func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta names.AttrEndpointType, "ssl_mode", ) { - input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == dms.ReplicationEndpointTypeValueTarget) + input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == string(awstypes.ReplicationEndpointTypeValueTarget)) input.ServiceAccessRoleArn = aws.String(d.Get("service_access_role_arn").(string)) input.ExtraConnectionAttributes = extraConnectionAnomalies(d) } - log.Println("[DEBUG] DMS update endpoint:", input) - - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { - _, err := conn.ModifyEndpointWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, "AccessDeniedFault") { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil + _, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.ModifyEndpoint(ctx, input) }) - if tfresource.TimedOut(err) { - _, err = conn.ModifyEndpointWithContext(ctx, input) - } - if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionUpdating, ResNameS3Endpoint, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating DMS S3 Endpoint (%s): %s", d.Id(), err) } } @@ -547,30 +509,30 @@ func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceS3EndpointDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Endpoint: (%s)", d.Id()) - _, err := conn.DeleteEndpointWithContext(ctx, &dms.DeleteEndpointInput{ + _, err := conn.DeleteEndpoint(ctx, &dms.DeleteEndpointInput{ EndpointArn: aws.String(d.Get("endpoint_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionDeleting, ResNameS3Endpoint, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating DMS S3 Endpoint (%s): %s", d.Id(), err) } - if err = waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionWaitingForDeletion, ResNameS3Endpoint, d.Id(), err) + if _, err := waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS S3 Endpoint (%s) delete: %s", d.Id(), err) } return diags } -func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { - s3s := &dms.S3Settings{} +func s3Settings(d *schema.ResourceData, target bool) *awstypes.S3Settings { + s3s := &awstypes.S3Settings{} if v, ok := d.Get("add_column_name").(bool); ok { // likely only useful for target s3s.AddColumnName = aws.Bool(v) @@ -589,7 +551,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("canned_acl_for_objects"); ok { // likely only useful for target - s3s.CannedAclForObjects = aws.String(v.(string)) + s3s.CannedAclForObjects = awstypes.CannedAclForObjectsValue(v.(string)) } if v, ok := d.Get("cdc_inserts_and_updates").(bool); ok { // likely only useful for target @@ -601,11 +563,11 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("cdc_max_batch_interval"); ok { // likely only useful for target - s3s.CdcMaxBatchInterval = aws.Int64(int64(v.(int))) + s3s.CdcMaxBatchInterval = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("cdc_min_file_size"); ok { // likely only useful for target - s3s.CdcMinFileSize = aws.Int64(int64(v.(int))) + s3s.CdcMinFileSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("cdc_path"); ok { @@ -613,7 +575,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("compression_type"); ok && target { // likely only useful for target - s3s.CompressionType = aws.String(v.(string)) + s3s.CompressionType = awstypes.CompressionTypeValue(v.(string)) } if v, ok := d.GetOk("csv_delimiter"); ok { @@ -633,15 +595,15 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("data_format"); ok && target { // target - s3s.DataFormat = aws.String(v.(string)) + s3s.DataFormat = awstypes.DataFormatValue(v.(string)) } if v, ok := d.GetOk("data_page_size"); ok { // likely only useful for target - s3s.DataPageSize = aws.Int64(int64(v.(int))) + s3s.DataPageSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("date_partition_delimiter"); ok && target { // target - s3s.DatePartitionDelimiter = aws.String(v.(string)) + s3s.DatePartitionDelimiter = awstypes.DatePartitionDelimiterValue(v.(string)) } if v, ok := d.Get("date_partition_enabled").(bool); ok && target { // likely only useful for target @@ -649,7 +611,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("date_partition_sequence"); ok && target { // target - s3s.DatePartitionSequence = aws.String(v.(string)) + s3s.DatePartitionSequence = awstypes.DatePartitionSequenceValue(v.(string)) } if v, ok := d.GetOk("date_partition_timezone"); ok && target { // target @@ -657,7 +619,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("dict_page_size_limit"); ok { // likely only useful for target - s3s.DictPageSizeLimit = aws.Int64(int64(v.(int))) + s3s.DictPageSizeLimit = aws.Int32(int32(v.(int))) } if v, ok := d.Get("enable_statistics").(bool); ok { // likely only useful for target @@ -665,11 +627,11 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("encoding_type"); ok { // likely only useful for target - s3s.EncodingType = aws.String(v.(string)) + s3s.EncodingType = awstypes.EncodingTypeValue(v.(string)) } if v, ok := d.GetOk("encryption_mode"); ok && target { // target - s3s.EncryptionMode = aws.String(v.(string)) + s3s.EncryptionMode = awstypes.EncryptionModeValue(v.(string)) } if v, ok := d.GetOk(names.AttrExpectedBucketOwner); ok { // likely only useful for target @@ -685,7 +647,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("ignore_header_rows"); ok { - s3s.IgnoreHeaderRows = aws.Int64(int64(v.(int))) + s3s.IgnoreHeaderRows = aws.Int32(int32(v.(int))) } if v, ok := d.Get("include_op_for_full_load").(bool); ok { // likely only useful for target @@ -693,7 +655,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("max_file_size"); ok { // likely only useful for target - s3s.MaxFileSize = aws.Int64(int64(v.(int))) + s3s.MaxFileSize = aws.Int32(int32(v.(int))) } if v, ok := d.Get("parquet_timestamp_in_millisecond").(bool); ok && target { // target @@ -701,7 +663,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("parquet_version"); ok && target { // target - s3s.ParquetVersion = aws.String(v.(string)) + s3s.ParquetVersion = awstypes.ParquetVersionValue(v.(string)) } if v, ok := d.Get("preserve_transactions").(bool); ok && target { // target @@ -713,7 +675,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("row_group_length"); ok { // likely only useful for target - s3s.RowGroupLength = aws.Int64(int64(v.(int))) + s3s.RowGroupLength = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("server_side_encryption_kms_key_id"); ok && target { // target diff --git a/internal/service/dms/service_endpoint_resolver_gen.go b/internal/service/dms/service_endpoint_resolver_gen.go index 0dd3f0e70a2..efa8af3ae75 100644 --- a/internal/service/dms/service_endpoint_resolver_gen.go +++ b/internal/service/dms/service_endpoint_resolver_gen.go @@ -6,65 +6,63 @@ import ( "context" "fmt" "net" - "net/url" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -var _ endpoints_sdkv1.Resolver = resolverSDKv1{} +var _ databasemigrationservice_sdkv2.EndpointResolverV2 = resolverSDKv2{} -type resolverSDKv1 struct { - ctx context.Context +type resolverSDKv2 struct { + defaultResolver databasemigrationservice_sdkv2.EndpointResolverV2 } -func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { - return resolverSDKv1{ - ctx: ctx, +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: databasemigrationservice_sdkv2.NewDefaultEndpointResolverV2(), } } -func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { - ctx := r.ctx +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params databasemigrationservice_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) - var opt endpoints_sdkv1.Options - opt.Set(opts...) - - useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) - defaultResolver := endpoints_sdkv1.DefaultResolver() + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } - if useFIPS { + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) - endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) if err != nil { return endpoint, err } tflog.Debug(ctx, "endpoint resolved", map[string]any{ - "tf_aws.endpoint": endpoint.URL, + "tf_aws.endpoint": endpoint.URI.String(), }) - var endpointURL *url.URL - endpointURL, err = url.Parse(endpoint.URL) - if err != nil { - return endpoint, err - } - - hostname := endpointURL.Hostname() + hostname := endpoint.URI.Hostname() _, err = net.LookupHost(hostname) if err != nil { if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ "tf_aws.hostname": hostname, }) - opts = append(opts, func(o *endpoints_sdkv1.Options) { - o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - }) + params.UseFIPS = aws_sdkv2.Bool(false) } else { - err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up databasemigrationservice endpoint %q: %s", hostname, err) return } } else { @@ -72,5 +70,13 @@ func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoin } } - return defaultResolver.EndpointFor(service, region, opts...) + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*databasemigrationservice_sdkv2.Options) { + return func(o *databasemigrationservice_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } } diff --git a/internal/service/dms/service_endpoints_gen_test.go b/internal/service/dms/service_endpoints_gen_test.go index de0605363bb..3491fffc9bb 100644 --- a/internal/service/dms/service_endpoints_gen_test.go +++ b/internal/service/dms/service_endpoints_gen_test.go @@ -4,18 +4,22 @@ package dms_test import ( "context" + "errors" "fmt" "maps" "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - databasemigrationservice_sdkv1 "github.com/aws/aws-sdk-go/service/databasemigrationservice" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -353,54 +357,63 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := databasemigrationservice_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(databasemigrationservice_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), databasemigrationservice_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func defaultFIPSEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := databasemigrationservice_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(databasemigrationservice_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), databasemigrationservice_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.DMSConn(ctx) + client := meta.DMSClient(ctx) - req, _ := client.DescribeCertificatesRequest(&databasemigrationservice_sdkv1.DescribeCertificatesInput{}) + var result apiCallParams - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.DescribeCertificates(ctx, &databasemigrationservice_sdkv2.DescribeCertificatesInput{}, + func(opts *databasemigrationservice_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -623,6 +636,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/dms/service_package_gen.go b/internal/service/dms/service_package_gen.go index ea9ef5d38a4..66ddfdbc8ac 100644 --- a/internal/service/dms/service_package_gen.go +++ b/internal/service/dms/service_package_gen.go @@ -5,10 +5,8 @@ package dms import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - databasemigrationservice_sdkv1 "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -27,24 +25,29 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceCertificate, + Factory: dataSourceCertificate, TypeName: "aws_dms_certificate", + Name: "Certificate", }, { - Factory: DataSourceEndpoint, + Factory: dataSourceEndpoint, TypeName: "aws_dms_endpoint", + Name: "Endpoint", }, { - Factory: DataSourceReplicationInstance, + Factory: dataSourceReplicationInstance, TypeName: "aws_dms_replication_instance", + Name: "Replication Instance", }, { - Factory: DataSourceReplicationSubnetGroup, + Factory: dataSourceReplicationSubnetGroup, TypeName: "aws_dms_replication_subnet_group", + Name: "Replication Subnet Group", }, { - Factory: DataSourceReplicationTask, + Factory: dataSourceReplicationTask, TypeName: "aws_dms_replication_task", + Name: "Replication Task", }, } } @@ -52,7 +55,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceCertificate, + Factory: resourceCertificate, TypeName: "aws_dms_certificate", Name: "Certificate", Tags: &types.ServicePackageResourceTags{ @@ -60,7 +63,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceEndpoint, + Factory: resourceEndpoint, TypeName: "aws_dms_endpoint", Name: "Endpoint", Tags: &types.ServicePackageResourceTags{ @@ -68,7 +71,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceEventSubscription, + Factory: resourceEventSubscription, TypeName: "aws_dms_event_subscription", Name: "Event Subscription", Tags: &types.ServicePackageResourceTags{ @@ -76,7 +79,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationConfig, + Factory: resourceReplicationConfig, TypeName: "aws_dms_replication_config", Name: "Replication Config", Tags: &types.ServicePackageResourceTags{ @@ -84,7 +87,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationInstance, + Factory: resourceReplicationInstance, TypeName: "aws_dms_replication_instance", Name: "Replication Instance", Tags: &types.ServicePackageResourceTags{ @@ -92,7 +95,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationSubnetGroup, + Factory: resourceReplicationSubnetGroup, TypeName: "aws_dms_replication_subnet_group", Name: "Replication Subnet Group", Tags: &types.ServicePackageResourceTags{ @@ -100,7 +103,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationTask, + Factory: resourceReplicationTask, TypeName: "aws_dms_replication_task", Name: "Replication Task", Tags: &types.ServicePackageResourceTags{ @@ -108,7 +111,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceS3Endpoint, + Factory: resourceS3Endpoint, TypeName: "aws_dms_s3_endpoint", Name: "S3 Endpoint", Tags: &types.ServicePackageResourceTags{ @@ -122,22 +125,14 @@ func (p *servicePackage) ServicePackageName() string { return names.DMS } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*databasemigrationservice_sdkv1.DatabaseMigrationService, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - } else { - cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*databasemigrationservice_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return databasemigrationservice_sdkv1.New(sess.Copy(&cfg)), nil + return databasemigrationservice_sdkv2.NewFromConfig(cfg, + databasemigrationservice_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/dms/sweep.go b/internal/service/dms/sweep.go index 85184a1c14e..06a186c4da2 100644 --- a/internal/service/dms/sweep.go +++ b/internal/service/dms/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -56,34 +56,31 @@ func sweepEndpoints(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeEndpointsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeEndpointsPagesWithContext(ctx, input, func(page *dms.DescribeEndpointsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeEndpointsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Endpoint sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Endpoints (%s): %w", region, err) } for _, v := range page.Endpoints { - r := ResourceEndpoint() + r := resourceEndpoint() d := r.Data(nil) - d.SetId(aws.StringValue(v.EndpointIdentifier)) + d.SetId(aws.ToString(v.EndpointIdentifier)) d.Set("endpoint_arn", v.EndpointArn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Endpoint sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Endpoints (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -101,33 +98,30 @@ func sweepReplicationConfigs(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationConfigsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationConfigsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationConfigsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Config sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Configs (%s): %w", region, err) } for _, v := range page.ReplicationConfigs { - r := ResourceReplicationConfig() + r := resourceReplicationConfig() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationConfigArn)) + d.SetId(aws.ToString(v.ReplicationConfigArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Config sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Configs (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -145,34 +139,31 @@ func sweepReplicationInstances(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationInstancesInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationInstancesPagesWithContext(ctx, input, func(page *dms.DescribeReplicationInstancesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationInstancesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Instance sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Instances (%s): %w", region, err) } for _, v := range page.ReplicationInstances { - r := ResourceReplicationInstance() + r := resourceReplicationInstance() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationInstanceIdentifier)) + d.SetId(aws.ToString(v.ReplicationInstanceIdentifier)) d.Set("replication_instance_arn", v.ReplicationInstanceArn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Instance sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Instances (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -190,33 +181,30 @@ func sweepReplicationSubnetGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationSubnetGroupsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationSubnetGroupsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationSubnetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationSubnetGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Subnet Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Subnet Groups (%s): %w", region, err) } for _, v := range page.ReplicationSubnetGroups { - r := ResourceReplicationSubnetGroup() + r := resourceReplicationSubnetGroup() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationSubnetGroupIdentifier)) + d.SetId(aws.ToString(v.ReplicationSubnetGroupIdentifier)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Subnet Group sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Subnet Groups (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -234,36 +222,33 @@ func sweepReplicationTasks(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationTasksInput{ WithoutSettings: aws.Bool(true), } sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationTasksPagesWithContext(ctx, input, func(page *dms.DescribeReplicationTasksOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationTasksPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Task sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Tasks (%s): %w", region, err) } for _, v := range page.ReplicationTasks { - r := ResourceReplicationTask() + r := resourceReplicationTask() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationTaskIdentifier)) + d.SetId(aws.ToString(v.ReplicationTaskIdentifier)) d.Set("replication_task_arn", v.ReplicationTaskArn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Task sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Tasks (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/dms/tags_gen.go b/internal/service/dms/tags_gen.go index 48393cf0070..b02092634a2 100644 --- a/internal/service/dms/tags_gen.go +++ b/internal/service/dms/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/aws/aws-sdk-go/service/databasemigrationservice/databasemigrationserviceiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists dms service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn databasemigrationserviceiface.DatabaseMigrationServiceAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *databasemigrationservice.Client, identifier string, optFns ...func(*databasemigrationservice.Options)) (tftags.KeyValueTags, error) { input := &databasemigrationservice.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn databasemigrationserviceiface.DatabaseMi // ListTags lists dms service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).DMSConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).DMSClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns dms service tags. -func Tags(tags tftags.KeyValueTags) []*databasemigrationservice.Tag { - result := make([]*databasemigrationservice.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &databasemigrationservice.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*databasemigrationservice.Tag { } // KeyValueTags creates tftags.KeyValueTags from databasemigrationservice service tags. -func KeyValueTags(ctx context.Context, tags []*databasemigrationservice.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*databasemigrationservice.Tag) tft // getTagsIn returns dms service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*databasemigrationservice.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*databasemigrationservice.Tag { } // setTagsOut sets dms service tags in Context. -func setTagsOut(ctx context.Context, tags []*databasemigrationservice.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*databasemigrationservice.Tag) { // updateTags updates dms service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn databasemigrationserviceiface.DatabaseMigrationServiceAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *databasemigrationservice.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*databasemigrationservice.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn databasemigrationserviceiface.Database if len(removedTags) > 0 { input := &databasemigrationservice.RemoveTagsFromResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.RemoveTagsFromResourceWithContext(ctx, input) + _, err := conn.RemoveTagsFromResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn databasemigrationserviceiface.Database Tags: Tags(updatedTags), } - _, err := conn.AddTagsToResourceWithContext(ctx, input) + _, err := conn.AddTagsToResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn databasemigrationserviceiface.Database // UpdateTags updates dms service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).DMSConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).DMSClient(ctx), identifier, oldTags, newTags) } diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index 928c0c8fe83..ddc66e924b1 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -2915,7 +2915,7 @@ service "dms" { sdk { id = "Database Migration Service" - client_version = [1] + client_version = [2] } names {