diff --git a/go.mod b/go.mod index 19b11f36acb..4283301dd4f 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/apprunner v1.28.4 github.com/aws/aws-sdk-go-v2/service/athena v1.40.4 github.com/aws/aws-sdk-go-v2/service/auditmanager v1.32.4 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.5 github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.4 github.com/aws/aws-sdk-go-v2/service/batch v1.36.1 github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.3.4 diff --git a/go.sum b/go.sum index c3ad6eb718b..41ceb14a4bb 100644 --- a/go.sum +++ b/go.sum @@ -72,6 +72,8 @@ github.com/aws/aws-sdk-go-v2/service/athena v1.40.4 h1:tiHIjFXSyb5DbNfnu3ql2r86s github.com/aws/aws-sdk-go-v2/service/athena v1.40.4/go.mod h1:6OHesqDfYPNzYI+VaXtmylYLyppuUy9SwRk4CH/pQA4= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.32.4 h1:45+KYpnG8ZKoqLkQSIg8hnU52rbBRyIYHqaSf+02P3I= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.32.4/go.mod h1:WHURzIps29VZSUz9jxpGeShOhGuf/SqQmNJLs3Ytfns= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.5 h1:vhdJymxlWS2qftzLiuCjSswjXBRLGfzo/BEE9LDveBA= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.5/go.mod h1:ZErgk/bPaaZIpj+lUWGlwI1A0UFhSIscgnCPzTLnb2s= github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.4 h1:LIQN+2GGZHwWksR6cVQtEF7xdLZUEsmUqy+4fStrZcE= github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.4/go.mod h1:5orNWB4auLR1UJ6MUrRpAwne7uZ84Y6cq/0sB6F6LA4= github.com/aws/aws-sdk-go-v2/service/batch v1.36.1 h1:OY9+Dt4FkK6q2VHKAB8zY4nEKOhkziVOtxCPnS94leM= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 28421423fa8..705c2ecbdd7 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -18,6 +18,7 @@ import ( apprunner_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apprunner" athena_sdkv2 "github.com/aws/aws-sdk-go-v2/service/athena" auditmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/auditmanager" + autoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscaling" autoscalingplans_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscalingplans" batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" bcmdataexports_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bcmdataexports" @@ -159,7 +160,6 @@ import ( appmesh_sdkv1 "github.com/aws/aws-sdk-go/service/appmesh" appstream_sdkv1 "github.com/aws/aws-sdk-go/service/appstream" appsync_sdkv1 "github.com/aws/aws-sdk-go/service/appsync" - autoscaling_sdkv1 "github.com/aws/aws-sdk-go/service/autoscaling" backup_sdkv1 "github.com/aws/aws-sdk-go/service/backup" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" @@ -333,8 +333,8 @@ func (c *AWSClient) AuditManagerClient(ctx context.Context) *auditmanager_sdkv2. return errs.Must(client[*auditmanager_sdkv2.Client](ctx, c, names.AuditManager, make(map[string]any))) } -func (c *AWSClient) AutoScalingConn(ctx context.Context) *autoscaling_sdkv1.AutoScaling { - return errs.Must(conn[*autoscaling_sdkv1.AutoScaling](ctx, c, names.AutoScaling, make(map[string]any))) +func (c *AWSClient) AutoScalingClient(ctx context.Context) *autoscaling_sdkv2.Client { + return errs.Must(client[*autoscaling_sdkv2.Client](ctx, c, names.AutoScaling, make(map[string]any))) } func (c *AWSClient) AutoScalingPlansClient(ctx context.Context) *autoscalingplans_sdkv2.Client { diff --git a/internal/flex/flex.go b/internal/flex/flex.go index d77da8d1281..79407d007a3 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -239,6 +239,14 @@ func FlattenInt64Set(list []*int64) *schema.Set { return schema.NewSet(schema.HashInt, FlattenInt64List(list)) } +// Takes the result of flatmap.Expand for an array of int32 +// and returns a []int32 +func ExpandInt32ValueList(configured []interface{}) []int32 { + return tfslices.ApplyToAll(configured, func(v any) int32 { + return int32(v.(int)) + }) +} + // Takes the result of flatmap.Expand for an array of int64 // and returns a []*int64 func ExpandInt64List(configured []interface{}) []*int64 { @@ -358,6 +366,11 @@ func IntValueToString(v int) *string { return aws.String(strconv.Itoa(v)) } +// Int64ToStringValue converts an int64 pointer to a Go string value. +func Int32ToStringValue(v *int32) string { + return strconv.FormatInt(int64(aws.Int32Value(v)), 10) +} + // Int64ToStringValue converts an int64 pointer to a Go string value. func Int64ToStringValue(v *int64) string { return strconv.FormatInt(aws.Int64Value(v), 10) diff --git a/internal/generate/tags/main.go b/internal/generate/tags/main.go index 7edaa4ace8f..0e745b3e1fa 100644 --- a/internal/generate/tags/main.go +++ b/internal/generate/tags/main.go @@ -330,6 +330,7 @@ func main() { SkipTypesImp: *skipTypesImp, TfLogPkg: *updateTags, TfResourcePkg: *getTag || *waitForPropagation || *retryTagsListTagsType != "", + TfSlicesPkg: *serviceTagsSlice && *tagTypeIDElem != "" && *tagTypeAddBoolElem != "", TimePkg: *waitForPropagation || *retryTagsListTagsType != "", CreateTagsFunc: createTagsFunc, diff --git a/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl b/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl index 212e5213dbd..b2d56378502 100644 --- a/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl +++ b/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl @@ -1,29 +1,22 @@ // []*SERVICE.Tag handling {{ if and ( .TagTypeIDElem ) ( .TagTypeAddBoolElem ) }} -// ListOfMap returns a list of {{ .ServicePackage }} in flattened map. +// listOfMap returns a list of {{ .ServicePackage }} tags in a flattened map. // // Compatible with setting Terraform state for strongly typed configuration blocks. // // This function strips tag resource identifier and type. Generally, this is // the desired behavior so the tag schema does not require those attributes. -// Use (tftags.KeyValueTags).ListOfMap() for full tag information. -func ListOfMap(tags tftags.KeyValueTags) []any { - var result []any - - for _, key := range tags.Keys() { - m := map[string]any{ +func listOfMap(tags tftags.KeyValueTags) []any { + return tfslices.ApplyToAll(tags.Keys(), func (key string) any { + return map[string]any{ "key": key, "value": aws.ToString(tags.KeyValue(key)), - {{ if .TagTypeAddBoolElem }} + {{- if .TagTypeAddBoolElem }} "{{ .TagTypeAddBoolElemSnake }}": aws.ToBool(tags.KeyAdditionalBoolValue(key, "{{ .TagTypeAddBoolElem }}")), {{ end }} } - - result = append(result, m) - } - - return result + }) } {{- end }} diff --git a/internal/service/autoscaling/attachment.go b/internal/service/autoscaling/attachment.go index 5a2e72cf794..b861140bdcf 100644 --- a/internal/service/autoscaling/attachment.go +++ b/internal/service/autoscaling/attachment.go @@ -8,8 +8,9 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -19,8 +20,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_autoscaling_attachment") -func ResourceAttachment() *schema.Resource { +// @SDKResource("aws_autoscaling_attachment", name="Attachment") +func resourceAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAttachmentCreate, ReadWithoutTimeout: resourceAttachmentRead, @@ -50,19 +51,19 @@ func ResourceAttachment() *schema.Resource { func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) asgName := d.Get("autoscaling_group_name").(string) if v, ok := d.GetOk("elb"); ok { lbName := v.(string) input := &autoscaling.AttachLoadBalancersInput{ AutoScalingGroupName: aws.String(asgName), - LoadBalancerNames: aws.StringSlice([]string{lbName}), + LoadBalancerNames: []string{lbName}, } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.AttachLoadBalancersWithContext(ctx, input) + return conn.AttachLoadBalancers(ctx, input) }, // ValidationError: Trying to update too many Load Balancers/Target Groups at once. The limit is 10 errCodeValidationError, "update too many") @@ -71,19 +72,20 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "attaching Auto Scaling Group (%s) load balancer (%s): %s", asgName, lbName, err) } } else { + lbTargetGroupARN := d.Get("lb_target_group_arn").(string) input := &autoscaling.AttachLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(asgName), - TargetGroupARNs: aws.StringSlice([]string{d.Get("lb_target_group_arn").(string)}), + TargetGroupARNs: []string{lbTargetGroupARN}, } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.AttachLoadBalancerTargetGroupsWithContext(ctx, input) + return conn.AttachLoadBalancerTargetGroups(ctx, input) }, errCodeValidationError, "update too many") if err != nil { - return sdkdiag.AppendErrorf(diags, "attaching Auto Scaling Group (%s) target group (%s): %s", asgName, d.Get("lb_target_group_arn").(string), err) + return sdkdiag.AppendErrorf(diags, "attaching Auto Scaling Group (%s) target group (%s): %s", asgName, lbTargetGroupARN, err) } } @@ -95,15 +97,15 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) asgName := d.Get("autoscaling_group_name").(string) var err error if v, ok := d.GetOk("elb"); ok { - err = FindAttachmentByLoadBalancerName(ctx, conn, asgName, v.(string)) + err = findAttachmentByLoadBalancerName(ctx, conn, asgName, v.(string)) } else { - err = FindAttachmentByTargetGroupARN(ctx, conn, asgName, d.Get("lb_target_group_arn").(string)) + err = findAttachmentByTargetGroupARN(ctx, conn, asgName, d.Get("lb_target_group_arn").(string)) } if !d.IsNewResource() && tfresource.NotFound(err) { @@ -121,54 +123,59 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) asgName := d.Get("autoscaling_group_name").(string) if v, ok := d.GetOk("elb"); ok { lbName := v.(string) input := &autoscaling.DetachLoadBalancersInput{ AutoScalingGroupName: aws.String(asgName), - LoadBalancerNames: aws.StringSlice([]string{lbName}), + LoadBalancerNames: []string{lbName}, } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.DetachLoadBalancersWithContext(ctx, input) + return conn.DetachLoadBalancers(ctx, input) }, errCodeValidationError, "update too many") + if tfawserr.ErrMessageContains(err, errCodeValidationError, "Trying to remove Load Balancers that are not part of the group") { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "detaching Auto Scaling Group (%s) load balancer (%s): %s", asgName, lbName, err) } } else { + lbTargetGroupARN := d.Get("lb_target_group_arn").(string) input := &autoscaling.DetachLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(asgName), - TargetGroupARNs: aws.StringSlice([]string{d.Get("lb_target_group_arn").(string)}), + TargetGroupARNs: []string{lbTargetGroupARN}, } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.DetachLoadBalancerTargetGroupsWithContext(ctx, input) + return conn.DetachLoadBalancerTargetGroups(ctx, input) }, errCodeValidationError, "update too many") if err != nil { - return sdkdiag.AppendErrorf(diags, "detaching Auto Scaling Group (%s) target group (%s): %s", asgName, d.Get("lb_target_group_arn").(string), err) + return sdkdiag.AppendErrorf(diags, "detaching Auto Scaling Group (%s) target group (%s): %s", asgName, lbTargetGroupARN, err) } } return diags } -func FindAttachmentByLoadBalancerName(ctx context.Context, conn *autoscaling.AutoScaling, asgName, loadBalancerName string) error { - asg, err := FindGroupByName(ctx, conn, asgName) +func findAttachmentByLoadBalancerName(ctx context.Context, conn *autoscaling.Client, asgName, loadBalancerName string) error { + asg, err := findGroupByName(ctx, conn, asgName) if err != nil { return err } for _, v := range asg.LoadBalancerNames { - if aws.StringValue(v) == loadBalancerName { + if v == loadBalancerName { return nil } } @@ -178,15 +185,15 @@ func FindAttachmentByLoadBalancerName(ctx context.Context, conn *autoscaling.Aut } } -func FindAttachmentByTargetGroupARN(ctx context.Context, conn *autoscaling.AutoScaling, asgName, targetGroupARN string) error { - asg, err := FindGroupByName(ctx, conn, asgName) +func findAttachmentByTargetGroupARN(ctx context.Context, conn *autoscaling.Client, asgName, targetGroupARN string) error { + asg, err := findGroupByName(ctx, conn, asgName) if err != nil { return err } for _, v := range asg.TargetGroupARNs { - if aws.StringValue(v) == targetGroupARN { + if v == targetGroupARN { return nil } } diff --git a/internal/service/autoscaling/attachment_test.go b/internal/service/autoscaling/attachment_test.go index cdd2586dc97..4896d59ac72 100644 --- a/internal/service/autoscaling/attachment_test.go +++ b/internal/service/autoscaling/attachment_test.go @@ -120,9 +120,32 @@ func TestAccAutoScalingAttachment_multipleALBTargetGroups(t *testing.T) { }) } +func TestAccAutoScalingAttachment_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_autoscaling_attachment.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AutoScalingServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAttachmentConfig_elb(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAttachmentByLoadBalancerNameExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfautoscaling.ResourceAttachment(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_attachment" { @@ -159,7 +182,7 @@ func testAccCheckAttachmentByLoadBalancerNameExists(ctx context.Context, n strin return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) return tfautoscaling.FindAttachmentByLoadBalancerName(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.Attributes["elb"]) } @@ -172,7 +195,7 @@ func testAccCheckAttachmentByTargetGroupARNExists(ctx context.Context, n string) return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) return tfautoscaling.FindAttachmentByTargetGroupARN(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.Attributes["lb_target_group_arn"]) } diff --git a/internal/service/autoscaling/consts.go b/internal/service/autoscaling/consts.go index 58c292e72df..3edf0cf99c7 100644 --- a/internal/service/autoscaling/consts.go +++ b/internal/service/autoscaling/consts.go @@ -16,15 +16,15 @@ const ( ) const ( - DefaultEnabledMetricsGranularity = "1Minute" + defaultEnabledMetricsGranularity = "1Minute" ) const ( - DefaultTerminationPolicy = "Default" + defaultTerminationPolicy = "Default" ) const ( - DefaultWarmPoolMaxGroupPreparedCapacity = -1 + defaultWarmPoolMaxGroupPreparedCapacity = -1 ) const ( @@ -48,33 +48,37 @@ const ( LoadBalancerTargetGroupStateRemoved = "Removed" ) +type desiredCapacityType string + const ( - DesiredCapacityTypeMemoryMiB = "memory-mib" - DesiredCapacityTypeUnits = "units" - DesiredCapacityTypeVCPU = "vcpu" + desiredCapacityTypeMemoryMiB desiredCapacityType = "memory-mib" + desiredCapacityTypeUnits desiredCapacityType = "units" + desiredCapacityTypeVCPU desiredCapacityType = "vcpu" ) -func DesiredCapacityType_Values() []string { - return []string{ - DesiredCapacityTypeMemoryMiB, - DesiredCapacityTypeUnits, - DesiredCapacityTypeVCPU, +func (desiredCapacityType) Values() []desiredCapacityType { + return []desiredCapacityType{ + desiredCapacityTypeMemoryMiB, + desiredCapacityTypeUnits, + desiredCapacityTypeVCPU, } } +type policyType string + const ( - PolicyTypePredictiveScaling = "PredictiveScaling" - PolicyTypeSimpleScaling = "SimpleScaling" - PolicyTypeStepScaling = "StepScaling" - PolicyTypeTargetTrackingScaling = "TargetTrackingScaling" + policyTypePredictiveScaling policyType = "PredictiveScaling" + policyTypeSimpleScaling policyType = "SimpleScaling" + policyTypeStepScaling policyType = "StepScaling" + policyTypeTargetTrackingScaling policyType = "TargetTrackingScaling" ) -func PolicyType_Values() []string { - return []string{ - PolicyTypePredictiveScaling, - PolicyTypeSimpleScaling, - PolicyTypeStepScaling, - PolicyTypeTargetTrackingScaling, +func (policyType) Values() []policyType { + return []policyType{ + policyTypePredictiveScaling, + policyTypeSimpleScaling, + policyTypeStepScaling, + policyTypeTargetTrackingScaling, } } @@ -90,25 +94,29 @@ const ( launchTemplateIDUnknown = "unknown" ) +type lifecycleHookDefaultResult string + const ( - lifecycleHookDefaultResultAbandon = "ABANDON" - lifecycleHookDefaultResultContinue = "CONTINUE" + lifecycleHookDefaultResultAbandon lifecycleHookDefaultResult = "ABANDON" + lifecycleHookDefaultResultContinue lifecycleHookDefaultResult = "CONTINUE" ) -func lifecycleHookDefaultResult_Values() []string { - return []string{ +func (lifecycleHookDefaultResult) Values() []lifecycleHookDefaultResult { + return []lifecycleHookDefaultResult{ lifecycleHookDefaultResultAbandon, lifecycleHookDefaultResultContinue, } } +type lifecycleHookLifecycleTransition string + const ( - lifecycleHookLifecycleTransitionInstanceLaunching = "autoscaling:EC2_INSTANCE_LAUNCHING" - lifecycleHookLifecycleTransitionInstanceTerminating = "autoscaling:EC2_INSTANCE_TERMINATING" + lifecycleHookLifecycleTransitionInstanceLaunching lifecycleHookLifecycleTransition = "autoscaling:EC2_INSTANCE_LAUNCHING" + lifecycleHookLifecycleTransitionInstanceTerminating lifecycleHookLifecycleTransition = "autoscaling:EC2_INSTANCE_TERMINATING" ) -func lifecycleHookLifecycleTransition_Values() []string { - return []string{ +func (lifecycleHookLifecycleTransition) Values() []lifecycleHookLifecycleTransition { + return []lifecycleHookLifecycleTransition{ lifecycleHookLifecycleTransitionInstanceLaunching, lifecycleHookLifecycleTransitionInstanceTerminating, } diff --git a/internal/service/autoscaling/errors.go b/internal/service/autoscaling/errors.go index f66ce2e3048..711e5f28a71 100644 --- a/internal/service/autoscaling/errors.go +++ b/internal/service/autoscaling/errors.go @@ -3,6 +3,15 @@ package autoscaling +import ( + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" +) + const ( errCodeValidationError = "ValidationError" ) + +var ( + errCodeResourceInUseFault = (*awstypes.ResourceInUseFault)(nil).ErrorCode() + errCodeScalingActivityInProgressFault = (*awstypes.ScalingActivityInProgressFault)(nil).ErrorCode() +) diff --git a/internal/service/autoscaling/exports_test.go b/internal/service/autoscaling/exports_test.go new file mode 100644 index 00000000000..d4554a11016 --- /dev/null +++ b/internal/service/autoscaling/exports_test.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package autoscaling + +// Exports for use in tests only. +var ( + ResourceAttachment = resourceAttachment + ResourceGroup = resourceGroup + ResourceGroupTag = resourceGroupTag + ResourceLaunchConfiguration = resourceLaunchConfiguration + ResourceLifecycleHook = resourceLifecycleHook + ResourceNotification = resourceNotification + ResourcePolicy = resourcePolicy + ResourceSchedule = resourceSchedule + ResourceTrafficSourceAttachment = resourceTrafficSourceAttachment + + FindAttachmentByLoadBalancerName = findAttachmentByLoadBalancerName + FindAttachmentByTargetGroupARN = findAttachmentByTargetGroupARN + FindGroupByName = findGroupByName + FindInstanceRefreshes = findInstanceRefreshes + FindLaunchConfigurationByName = findLaunchConfigurationByName + FindLifecycleHookByTwoPartKey = findLifecycleHookByTwoPartKey + FindNotificationsByTwoPartKey = findNotificationsByTwoPartKey + FindScalingPolicyByTwoPartKey = findScalingPolicyByTwoPartKey + FindScheduleByTwoPartKey = findScheduleByTwoPartKey + FindTag = findTag + FindTrafficSourceAttachmentByThreePartKey = findTrafficSourceAttachmentByThreePartKey +) diff --git a/internal/service/autoscaling/flex.go b/internal/service/autoscaling/flex.go deleted file mode 100644 index 28c41aab07f..00000000000 --- a/internal/service/autoscaling/flex.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package autoscaling - -import ( - "fmt" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" -) - -// Takes the result of flatmap.Expand for an array of step adjustments and -// returns a []*autoscaling.StepAdjustment. -func ExpandStepAdjustments(configured []interface{}) ([]*autoscaling.StepAdjustment, error) { - var adjustments []*autoscaling.StepAdjustment - - // Loop over our configured step adjustments and create an array - // of aws-sdk-go compatible objects. We're forced to convert strings - // to floats here because there's no way to detect whether or not - // an uninitialized, optional schema element is "0.0" deliberately. - // With strings, we can test for "", which is definitely an empty - // struct value. - for _, raw := range configured { - data := raw.(map[string]interface{}) - a := &autoscaling.StepAdjustment{ - ScalingAdjustment: aws.Int64(int64(data["scaling_adjustment"].(int))), - } - if data["metric_interval_lower_bound"] != "" { - bound := data["metric_interval_lower_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, fmt.Errorf( - "metric_interval_lower_bound must be a float value represented as a string") - } - a.MetricIntervalLowerBound = aws.Float64(f) - default: - return nil, fmt.Errorf( - "metric_interval_lower_bound isn't a string. This is a bug. Please file an issue.") - } - } - if data["metric_interval_upper_bound"] != "" { - bound := data["metric_interval_upper_bound"] - switch bound := bound.(type) { - case string: - f, err := strconv.ParseFloat(bound, 64) - if err != nil { - return nil, fmt.Errorf( - "metric_interval_upper_bound must be a float value represented as a string") - } - a.MetricIntervalUpperBound = aws.Float64(f) - default: - return nil, fmt.Errorf( - "metric_interval_upper_bound isn't a string. This is a bug. Please file an issue.") - } - } - adjustments = append(adjustments, a) - } - - return adjustments, nil -} - -// Flattens step adjustments into a list of map[string]interface. -func FlattenStepAdjustments(adjustments []*autoscaling.StepAdjustment) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(adjustments)) - for _, raw := range adjustments { - a := map[string]interface{}{ - "scaling_adjustment": aws.Int64Value(raw.ScalingAdjustment), - } - if raw.MetricIntervalUpperBound != nil { - a["metric_interval_upper_bound"] = fmt.Sprintf("%g", aws.Float64Value(raw.MetricIntervalUpperBound)) - } - if raw.MetricIntervalLowerBound != nil { - a["metric_interval_lower_bound"] = fmt.Sprintf("%g", aws.Float64Value(raw.MetricIntervalLowerBound)) - } - result = append(result, a) - } - return result -} diff --git a/internal/service/autoscaling/flex_test.go b/internal/service/autoscaling/flex_test.go deleted file mode 100644 index 598de484cc1..00000000000 --- a/internal/service/autoscaling/flex_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package autoscaling - -import ( - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" -) - -func TestExpandStepAdjustments(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - "metric_interval_lower_bound": "1.0", - "metric_interval_upper_bound": "2.0", - "scaling_adjustment": 1, - }, - } - parameters, err := ExpandStepAdjustments(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &autoscaling.StepAdjustment{ - MetricIntervalLowerBound: aws.Float64(1.0), - MetricIntervalUpperBound: aws.Float64(2.0), - ScalingAdjustment: aws.Int64(1), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - -func TestFlattenStepAdjustments(t *testing.T) { - t.Parallel() - - expanded := []*autoscaling.StepAdjustment{ - { - MetricIntervalLowerBound: aws.Float64(1.0), - MetricIntervalUpperBound: aws.Float64(2.5), - ScalingAdjustment: aws.Int64(1), - }, - } - - result := FlattenStepAdjustments(expanded)[0] - if result == nil { - t.Fatal("expected result to have value, but got nil") - } - if result["metric_interval_lower_bound"] != "1" { - t.Fatalf("expected metric_interval_lower_bound to be 1, but got %s", result["metric_interval_lower_bound"]) - } - if result["metric_interval_upper_bound"] != "2.5" { - t.Fatalf("expected metric_interval_upper_bound to be 2.5, but got %s", result["metric_interval_upper_bound"]) - } - if result["scaling_adjustment"] != int64(1) { - t.Fatalf("expected scaling_adjustment to be 1, but got %d", result["scaling_adjustment"]) - } -} diff --git a/internal/service/autoscaling/generate.go b/internal/service/autoscaling/generate.go index 2bd105f5a8b..dc0435f5bec 100644 --- a/internal/service/autoscaling/generate.go +++ b/internal/service/autoscaling/generate.go @@ -1,8 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -GetTag -ListTags -ListTagsOp=DescribeTags -ListTagsOpPaginated -ListTagsInFiltIDName=auto-scaling-group -ServiceTagsSlice -TagOp=CreateOrUpdateTags -TagResTypeElem=ResourceType -TagType2=TagDescription -TagTypeAddBoolElem=PropagateAtLaunch -TagTypeIDElem=ResourceId -UntagOp=DeleteTags -UntagInNeedTagType -UntagInTagsElem=Tags -UpdateTags -//go:generate go run ../../generate/listpages/main.go -ListOps=DescribeInstanceRefreshes,DescribeLoadBalancers,DescribeLoadBalancerTargetGroups,DescribeWarmPool +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -GetTag -GetTagFunc=findTag -ListTags -ListTagsOp=DescribeTags -ListTagsOpPaginated -ListTagsInFiltIDName=auto-scaling-group -ServiceTagsSlice -TagOp=CreateOrUpdateTags -TagResTypeElem=ResourceType -TagType2=TagDescription -TagTypeAddBoolElem=PropagateAtLaunch -TagTypeIDElem=ResourceId -UntagOp=DeleteTags -UntagInNeedTagType -UntagInTagsElem=Tags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/autoscaling/group.go b/internal/service/autoscaling/group.go index 98fbe73b36e..27eafb9b4a9 100644 --- a/internal/service/autoscaling/group.go +++ b/internal/service/autoscaling/group.go @@ -13,11 +13,12 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -27,6 +28,8 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfelb "github.com/hashicorp/terraform-provider-aws/internal/service/elb" @@ -36,8 +39,8 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -// @SDKResource("aws_autoscaling_group") -func ResourceGroup() *schema.Resource { +// @SDKResource("aws_autoscaling_group", name="Group") +func resourceGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceGroupCreate, ReadWithoutTimeout: resourceGroupRead, @@ -97,9 +100,9 @@ func ResourceGroup() *schema.Resource { Computed: true, }, "desired_capacity_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(DesiredCapacityType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[desiredCapacityType](), }, "enabled_metrics": { Type: schema.TypeSet, @@ -138,11 +141,11 @@ func ResourceGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default_result": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(lifecycleHookDefaultResult_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[lifecycleHookDefaultResult](), }, "heartbeat_timeout": { Type: schema.TypeInt, @@ -151,10 +154,10 @@ func ResourceGroup() *schema.Resource { ValidateFunc: validation.IntBetween(30, 7200), }, "lifecycle_transition": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(lifecycleHookLifecycleTransition_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[lifecycleHookLifecycleTransition](), }, "name": { Type: schema.TypeString, @@ -263,10 +266,10 @@ func ResourceGroup() *schema.Resource { ValidateFunc: validation.IntBetween(0, 100), }, "scale_in_protected_instances": { - Type: schema.TypeString, - Optional: true, - Default: autoscaling.ScaleInProtectedInstancesIgnore, - ValidateFunc: validation.StringInSlice(autoscaling.ScaleInProtectedInstances_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ScaleInProtectedInstancesIgnore, + ValidateDiagFunc: enum.Validate[awstypes.ScaleInProtectedInstances](), }, "skip_matching": { Type: schema.TypeBool, @@ -274,18 +277,18 @@ func ResourceGroup() *schema.Resource { Default: false, }, "standby_instances": { - Type: schema.TypeString, - Optional: true, - Default: autoscaling.StandbyInstancesIgnore, - ValidateFunc: validation.StringInSlice(autoscaling.StandbyInstances_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.StandbyInstancesIgnore, + ValidateDiagFunc: enum.Validate[awstypes.StandbyInstances](), }, }, }, }, "strategy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(autoscaling.RefreshStrategy_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RefreshStrategy](), }, "triggers": { Type: schema.TypeSet, @@ -352,7 +355,7 @@ func ResourceGroup() *schema.Resource { "metrics_granularity": { Type: schema.TypeString, Optional: true, - Default: DefaultEnabledMetricsGranularity, + Default: defaultEnabledMetricsGranularity, }, "min_elb_capacity": { Type: schema.TypeInt, @@ -485,16 +488,16 @@ func ResourceGroup() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(autoscaling.AcceleratorManufacturer_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.AcceleratorManufacturer](), }, }, "accelerator_names": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(autoscaling.AcceleratorName_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.AcceleratorName](), }, }, "accelerator_total_memory_mib": { @@ -520,8 +523,8 @@ func ResourceGroup() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(autoscaling.AcceleratorType_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.AcceleratorType](), }, }, "allowed_instance_types": { @@ -531,9 +534,9 @@ func ResourceGroup() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, "bare_metal": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(autoscaling.BareMetal_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.BareMetal](), }, "baseline_ebs_bandwidth_mbps": { Type: schema.TypeList, @@ -555,16 +558,16 @@ func ResourceGroup() *schema.Resource { }, }, "burstable_performance": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(autoscaling.BurstablePerformance_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.BurstablePerformance](), }, "cpu_manufacturers": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(autoscaling.CpuManufacturer_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.CpuManufacturer](), }, }, "excluded_instance_types": { @@ -577,21 +580,21 @@ func ResourceGroup() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(autoscaling.InstanceGeneration_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.InstanceGeneration](), }, }, "local_storage": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(autoscaling.LocalStorage_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.LocalStorage](), }, "local_storage_types": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(autoscaling.LocalStorageType_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.LocalStorageType](), }, }, "memory_gib_per_vcpu": { @@ -900,7 +903,7 @@ func ResourceGroup() *schema.Resource { "max_group_prepared_capacity": { Type: schema.TypeInt, Optional: true, - Default: DefaultWarmPoolMaxGroupPreparedCapacity, + Default: defaultWarmPoolMaxGroupPreparedCapacity, }, "min_size": { Type: schema.TypeInt, @@ -908,10 +911,10 @@ func ResourceGroup() *schema.Resource { Default: 0, }, "pool_state": { - Type: schema.TypeString, - Optional: true, - Default: autoscaling.WarmPoolStateStopped, - ValidateFunc: validation.StringInSlice(autoscaling.WarmPoolState_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.WarmPoolStateStopped, + ValidateDiagFunc: enum.Validate[awstypes.WarmPoolState](), }, }, }, @@ -996,16 +999,16 @@ func launchTemplateCustomDiff(baseAttribute, subAttribute string) schema.Customi func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) startTime := time.Now() asgName := create.Name(d.Get("name").(string), d.Get("name_prefix").(string)) - createInput := &autoscaling.CreateAutoScalingGroupInput{ + inputCASG := &autoscaling.CreateAutoScalingGroupInput{ AutoScalingGroupName: aws.String(asgName), NewInstancesProtectedFromScaleIn: aws.Bool(d.Get("protect_from_scale_in").(bool)), } - updateInput := &autoscaling.UpdateAutoScalingGroupInput{ + inputUASG := &autoscaling.UpdateAutoScalingGroupInput{ AutoScalingGroupName: aws.String(asgName), } @@ -1017,115 +1020,115 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter desiredCapacity := d.Get("desired_capacity").(int) if twoPhases { - createInput.MaxSize = aws.Int64(0) - createInput.MinSize = aws.Int64(0) + inputCASG.MaxSize = aws.Int32(0) + inputCASG.MinSize = aws.Int32(0) - updateInput.MaxSize = aws.Int64(int64(maxSize)) - updateInput.MinSize = aws.Int64(int64(minSize)) + inputUASG.MaxSize = aws.Int32(int32(maxSize)) + inputUASG.MinSize = aws.Int32(int32(minSize)) if desiredCapacity > 0 { - updateInput.DesiredCapacity = aws.Int64(int64(desiredCapacity)) + inputUASG.DesiredCapacity = aws.Int32(int32(desiredCapacity)) } if v, ok := d.GetOk("desired_capacity_type"); ok { - updateInput.DesiredCapacityType = aws.String(v.(string)) + inputUASG.DesiredCapacityType = aws.String(v.(string)) } } else { - createInput.MaxSize = aws.Int64(int64(maxSize)) - createInput.MinSize = aws.Int64(int64(minSize)) + inputCASG.MaxSize = aws.Int32(int32(maxSize)) + inputCASG.MinSize = aws.Int32(int32(minSize)) if desiredCapacity > 0 { - createInput.DesiredCapacity = aws.Int64(int64(desiredCapacity)) + inputCASG.DesiredCapacity = aws.Int32(int32(desiredCapacity)) } if v, ok := d.GetOk("desired_capacity_type"); ok { - createInput.DesiredCapacityType = aws.String(v.(string)) + inputCASG.DesiredCapacityType = aws.String(v.(string)) } } if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { - createInput.AvailabilityZones = flex.ExpandStringSet(v.(*schema.Set)) + inputCASG.AvailabilityZones = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("capacity_rebalance"); ok { - createInput.CapacityRebalance = aws.Bool(v.(bool)) + inputCASG.CapacityRebalance = aws.Bool(v.(bool)) } if v, ok := d.GetOk("context"); ok { - createInput.Context = aws.String(v.(string)) + inputCASG.Context = aws.String(v.(string)) } if v, ok := d.GetOk("default_cooldown"); ok { - createInput.DefaultCooldown = aws.Int64(int64(v.(int))) + inputCASG.DefaultCooldown = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("default_instance_warmup"); ok { - createInput.DefaultInstanceWarmup = aws.Int64(int64(v.(int))) + inputCASG.DefaultInstanceWarmup = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("health_check_type"); ok { - createInput.HealthCheckType = aws.String(v.(string)) + inputCASG.HealthCheckType = aws.String(v.(string)) } if v, ok := d.GetOk("health_check_grace_period"); ok { - createInput.HealthCheckGracePeriod = aws.Int64(int64(v.(int))) + inputCASG.HealthCheckGracePeriod = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("instance_maintenance_policy"); ok { - createInput.InstanceMaintenancePolicy = expandInstanceMaintenancePolicy(v.([]interface{})) + inputCASG.InstanceMaintenancePolicy = expandInstanceMaintenancePolicy(v.([]interface{})) } if v, ok := d.GetOk("launch_configuration"); ok { - createInput.LaunchConfigurationName = aws.String(v.(string)) + inputCASG.LaunchConfigurationName = aws.String(v.(string)) } if v, ok := d.GetOk("launch_template"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - createInput.LaunchTemplate = expandLaunchTemplateSpecification(v.([]interface{})[0].(map[string]interface{}), false) + inputCASG.LaunchTemplate = expandLaunchTemplateSpecification(v.([]interface{})[0].(map[string]interface{}), false) } if v, ok := d.GetOk("load_balancers"); ok && v.(*schema.Set).Len() > 0 { - createInput.LoadBalancerNames = flex.ExpandStringSet(v.(*schema.Set)) + inputCASG.LoadBalancerNames = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("max_instance_lifetime"); ok { - createInput.MaxInstanceLifetime = aws.Int64(int64(v.(int))) + inputCASG.MaxInstanceLifetime = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("mixed_instances_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - createInput.MixedInstancesPolicy = expandMixedInstancesPolicy(v.([]interface{})[0].(map[string]interface{}), true) + inputCASG.MixedInstancesPolicy = expandMixedInstancesPolicy(v.([]interface{})[0].(map[string]interface{}), true) } if v, ok := d.GetOk("placement_group"); ok { - createInput.PlacementGroup = aws.String(v.(string)) + inputCASG.PlacementGroup = aws.String(v.(string)) } if v, ok := d.GetOk("service_linked_role_arn"); ok { - createInput.ServiceLinkedRoleARN = aws.String(v.(string)) + inputCASG.ServiceLinkedRoleARN = aws.String(v.(string)) } if v, ok := d.GetOk("tag"); ok { - createInput.Tags = Tags(KeyValueTags(ctx, v, asgName, TagResourceTypeGroup).IgnoreAWS()) + inputCASG.Tags = Tags(KeyValueTags(ctx, v, asgName, TagResourceTypeGroup).IgnoreAWS()) } if v, ok := d.GetOk("target_group_arns"); ok && len(v.(*schema.Set).List()) > 0 { - createInput.TargetGroupARNs = flex.ExpandStringSet(v.(*schema.Set)) + inputCASG.TargetGroupARNs = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { - createInput.TerminationPolicies = flex.ExpandStringList(v.([]interface{})) + inputCASG.TerminationPolicies = flex.ExpandStringValueList(v.([]interface{})) } if v, ok := d.GetOk("traffic_source"); ok && v.(*schema.Set).Len() > 0 { - createInput.TrafficSources = expandTrafficSourceIdentifiers(v.(*schema.Set).List()) + inputCASG.TrafficSources = expandTrafficSourceIdentifiers(v.(*schema.Set).List()) } if v, ok := d.GetOk("vpc_zone_identifier"); ok && v.(*schema.Set).Len() > 0 { - createInput.VPCZoneIdentifier = expandVPCZoneIdentifiers(v.(*schema.Set).List()) + inputCASG.VPCZoneIdentifier = expandVPCZoneIdentifiers(v.(*schema.Set).List()) } _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateAutoScalingGroupWithContext(ctx, createInput) + return conn.CreateAutoScalingGroup(ctx, inputCASG) }, // ValidationError: You must use a valid fully-formed launch template. Value (tf-acc-test-6643732652421074386) for parameter iamInstanceProfile.name is invalid. Invalid IAM Instance Profile name errCodeValidationError, "Invalid IAM Instance Profile") @@ -1138,9 +1141,12 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter if twoPhases { for _, input := range expandPutLifecycleHookInputs(asgName, initialLifecycleHooks) { - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 5*time.Minute, + const ( + timeout = 5 * time.Minute + ) + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func() (interface{}, error) { - return conn.PutLifecycleHookWithContext(ctx, input) + return conn.PutLifecycleHook(ctx, input) }, errCodeValidationError, "Unable to publish test message to notification target") @@ -1149,7 +1155,7 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter } } - _, err = conn.UpdateAutoScalingGroupWithContext(ctx, updateInput) + _, err = conn.UpdateAutoScalingGroup(ctx, inputUASG) if err != nil { return sdkdiag.AppendErrorf(diags, "setting Auto Scaling Group (%s) initial capacity: %s", d.Id(), err) @@ -1188,12 +1194,12 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk("suspended_processes"); ok && v.(*schema.Set).Len() > 0 { - input := &autoscaling.ScalingProcessQuery{ + input := &autoscaling.SuspendProcessesInput{ AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: flex.ExpandStringSet(v.(*schema.Set)), + ScalingProcesses: flex.ExpandStringValueSet(v.(*schema.Set)), } - _, err := conn.SuspendProcessesWithContext(ctx, input) + _, err := conn.SuspendProcesses(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "suspending Auto Scaling Group (%s) scaling processes: %s", d.Id(), err) @@ -1204,10 +1210,10 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter input := &autoscaling.EnableMetricsCollectionInput{ AutoScalingGroupName: aws.String(d.Id()), Granularity: aws.String(d.Get("metrics_granularity").(string)), - Metrics: flex.ExpandStringSet(v.(*schema.Set)), + Metrics: flex.ExpandStringValueSet(v.(*schema.Set)), } - _, err := conn.EnableMetricsCollectionWithContext(ctx, input) + _, err := conn.EnableMetricsCollection(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "enabling Auto Scaling Group (%s) metrics collection: %s", d.Id(), err) @@ -1215,7 +1221,7 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk("warm_pool"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - _, err := conn.PutWarmPoolWithContext(ctx, expandPutWarmPoolInput(d.Id(), v.([]interface{})[0].(map[string]interface{}))) + _, err := conn.PutWarmPool(ctx, expandPutWarmPoolInput(d.Id(), v.([]interface{})[0].(map[string]interface{}))) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Auto Scaling Warm Pool (%s): %s", d.Id(), err) @@ -1227,10 +1233,10 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - g, err := FindGroupByName(ctx, conn, d.Id()) + g, err := findGroupByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Auto Scaling Group %s not found, removing from state", d.Id()) @@ -1243,7 +1249,7 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interfa } d.Set("arn", g.AutoScalingGroupARN) - d.Set("availability_zones", aws.StringValueSlice(g.AvailabilityZones)) + d.Set("availability_zones", g.AvailabilityZones) d.Set("capacity_rebalance", g.CapacityRebalance) d.Set("context", g.Context) d.Set("default_cooldown", g.DefaultCooldown) @@ -1255,7 +1261,7 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set("metrics_granularity", g.EnabledMetrics[0].Granularity) } else { d.Set("enabled_metrics", nil) - d.Set("metrics_granularity", DefaultEnabledMetricsGranularity) + d.Set("metrics_granularity", defaultEnabledMetricsGranularity) } d.Set("health_check_grace_period", g.HealthCheckGracePeriod) d.Set("health_check_type", g.HealthCheckType) @@ -1270,7 +1276,7 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interfa } else { d.Set("launch_template", nil) } - d.Set("load_balancers", aws.StringValueSlice(g.LoadBalancerNames)) + d.Set("load_balancers", g.LoadBalancerNames) d.Set("max_instance_lifetime", g.MaxInstanceLifetime) d.Set("max_size", g.MaxSize) d.Set("min_size", g.MinSize) @@ -1282,7 +1288,7 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set("mixed_instances_policy", nil) } d.Set("name", g.AutoScalingGroupName) - d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(g.AutoScalingGroupName))) + d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(g.AutoScalingGroupName))) d.Set("placement_group", g.PlacementGroup) d.Set("predicted_capacity", g.PredictedCapacity) d.Set("protect_from_scale_in", g.NewInstancesProtectedFromScaleIn) @@ -1291,17 +1297,17 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interfa if err := d.Set("traffic_source", flattenTrafficSourceIdentifiers(g.TrafficSources)); err != nil { return sdkdiag.AppendErrorf(diags, "setting traffic_source: %s", err) } - d.Set("target_group_arns", aws.StringValueSlice(g.TargetGroupARNs)) + d.Set("target_group_arns", g.TargetGroupARNs) // If no termination polices are explicitly configured and the upstream state // is only using the "Default" policy, clear the state to make it consistent // with the default AWS Create API behavior. - if _, ok := d.GetOk("termination_policies"); !ok && len(g.TerminationPolicies) == 1 && aws.StringValue(g.TerminationPolicies[0]) == DefaultTerminationPolicy { + if _, ok := d.GetOk("termination_policies"); !ok && len(g.TerminationPolicies) == 1 && g.TerminationPolicies[0] == defaultTerminationPolicy { d.Set("termination_policies", nil) } else { - d.Set("termination_policies", aws.StringValueSlice(g.TerminationPolicies)) + d.Set("termination_policies", g.TerminationPolicies) } - if len(aws.StringValue(g.VPCZoneIdentifier)) > 0 { - d.Set("vpc_zone_identifier", strings.Split(aws.StringValue(g.VPCZoneIdentifier), ",")) + if len(aws.ToString(g.VPCZoneIdentifier)) > 0 { + d.Set("vpc_zone_identifier", strings.Split(aws.ToString(g.VPCZoneIdentifier), ",")) } else { d.Set("vpc_zone_identifier", nil) } @@ -1314,7 +1320,7 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interfa } d.Set("warm_pool_size", g.WarmPoolSize) - if err := d.Set("tag", ListOfMap(KeyValueTags(ctx, g.Tags, d.Id(), TagResourceTypeGroup).IgnoreAWS().IgnoreConfig(ignoreTagsConfig))); err != nil { + if err := d.Set("tag", listOfMap(KeyValueTags(ctx, g.Tags, d.Id(), TagResourceTypeGroup).IgnoreAWS().IgnoreConfig(ignoreTagsConfig))); err != nil { return sdkdiag.AppendErrorf(diags, "setting tag: %s", err) } @@ -1323,7 +1329,7 @@ func resourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interfa func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) startTime := time.Now() @@ -1346,7 +1352,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter if d.HasChange("availability_zones") { if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { - input.AvailabilityZones = flex.ExpandStringSet(v.(*schema.Set)) + input.AvailabilityZones = flex.ExpandStringValueSet(v.(*schema.Set)) } } @@ -1365,15 +1371,15 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } if d.HasChange("default_cooldown") { - input.DefaultCooldown = aws.Int64(int64(d.Get("default_cooldown").(int))) + input.DefaultCooldown = aws.Int32(int32(d.Get("default_cooldown").(int))) } if d.HasChange("default_instance_warmup") { - input.DefaultInstanceWarmup = aws.Int64(int64(d.Get("default_instance_warmup").(int))) + input.DefaultInstanceWarmup = aws.Int32(int32(d.Get("default_instance_warmup").(int))) } if d.HasChange("desired_capacity") { - input.DesiredCapacity = aws.Int64(int64(d.Get("desired_capacity").(int))) + input.DesiredCapacity = aws.Int32(int32(d.Get("desired_capacity").(int))) shouldWaitForCapacity = true } @@ -1383,11 +1389,11 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } if d.HasChange("health_check_grace_period") { - input.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int))) + input.HealthCheckGracePeriod = aws.Int32(int32(d.Get("health_check_grace_period").(int))) } if d.HasChange("health_check_type") { - input.HealthCheckGracePeriod = aws.Int64(int64(d.Get("health_check_grace_period").(int))) + input.HealthCheckGracePeriod = aws.Int32(int32(d.Get("health_check_grace_period").(int))) input.HealthCheckType = aws.String(d.Get("health_check_type").(string)) } @@ -1410,15 +1416,15 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } if d.HasChange("max_instance_lifetime") { - input.MaxInstanceLifetime = aws.Int64(int64(d.Get("max_instance_lifetime").(int))) + input.MaxInstanceLifetime = aws.Int32(int32(d.Get("max_instance_lifetime").(int))) } if d.HasChange("max_size") { - input.MaxSize = aws.Int64(int64(d.Get("max_size").(int))) + input.MaxSize = aws.Int32(int32(d.Get("max_size").(int))) } if d.HasChange("min_size") { - input.MinSize = aws.Int64(int64(d.Get("min_size").(int))) + input.MinSize = aws.Int32(int32(d.Get("min_size").(int))) shouldWaitForCapacity = true } @@ -1441,9 +1447,9 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter // If the termination policy is set to null, we need to explicitly set // it back to "Default", or the API won't reset it for us. if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { - input.TerminationPolicies = flex.ExpandStringList(v.([]interface{})) + input.TerminationPolicies = flex.ExpandStringValueList(v.([]interface{})) } else { - input.TerminationPolicies = aws.StringSlice([]string{DefaultTerminationPolicy}) + input.TerminationPolicies = []string{defaultTerminationPolicy} } } @@ -1451,7 +1457,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter input.VPCZoneIdentifier = expandVPCZoneIdentifiers(d.Get("vpc_zone_identifier").(*schema.Set).List()) } - _, err := conn.UpdateAutoScalingGroupWithContext(ctx, input) + _, err := conn.UpdateAutoScalingGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Auto Scaling Group (%s): %s", d.Id(), err) @@ -1460,7 +1466,6 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter if d.HasChanges("tag") { oTagRaw, nTagRaw := d.GetChange("tag") - oldTags := Tags(KeyValueTags(ctx, oTagRaw, d.Id(), TagResourceTypeGroup)) newTags := Tags(KeyValueTags(ctx, nTagRaw, d.Id(), TagResourceTypeGroup)) @@ -1477,14 +1482,13 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter if n == nil { n = new(schema.Set) } - os := o.(*schema.Set) ns := n.(*schema.Set) // API only supports adding or removing 10 at a time. batchSize := 10 for _, chunk := range tfslices.Chunks(expandTrafficSourceIdentifiers(os.Difference(ns).List()), batchSize) { - _, err := conn.DetachTrafficSourcesWithContext(ctx, &autoscaling.DetachTrafficSourcesInput{ + _, err := conn.DetachTrafficSources(ctx, &autoscaling.DetachTrafficSourcesInput{ AutoScalingGroupName: aws.String(d.Id()), TrafficSources: chunk, }) @@ -1499,7 +1503,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } for _, chunk := range tfslices.Chunks(expandTrafficSourceIdentifiers(ns.Difference(os).List()), batchSize) { - _, err := conn.AttachTrafficSourcesWithContext(ctx, &autoscaling.AttachTrafficSourcesInput{ + _, err := conn.AttachTrafficSources(ctx, &autoscaling.AttachTrafficSourcesInput{ AutoScalingGroupName: aws.String(d.Id()), TrafficSources: chunk, }) @@ -1527,8 +1531,8 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter // API only supports adding or removing 10 at a time. batchSize := 10 - for _, chunk := range tfslices.Chunks(flex.ExpandStringSet(os.Difference(ns)), batchSize) { - _, err := conn.DetachLoadBalancersWithContext(ctx, &autoscaling.DetachLoadBalancersInput{ + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(os.Difference(ns)), batchSize) { + _, err := conn.DetachLoadBalancers(ctx, &autoscaling.DetachLoadBalancersInput{ AutoScalingGroupName: aws.String(d.Id()), LoadBalancerNames: chunk, }) @@ -1542,8 +1546,8 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } } - for _, chunk := range tfslices.Chunks(flex.ExpandStringSet(ns.Difference(os)), batchSize) { - _, err := conn.AttachLoadBalancersWithContext(ctx, &autoscaling.AttachLoadBalancersInput{ + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(ns.Difference(os)), batchSize) { + _, err := conn.AttachLoadBalancers(ctx, &autoscaling.AttachLoadBalancersInput{ AutoScalingGroupName: aws.String(d.Id()), LoadBalancerNames: chunk, }) @@ -1571,8 +1575,8 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter // API only supports adding or removing 10 at a time. batchSize := 10 - for _, chunk := range tfslices.Chunks(flex.ExpandStringSet(os.Difference(ns)), batchSize) { - _, err := conn.DetachLoadBalancerTargetGroupsWithContext(ctx, &autoscaling.DetachLoadBalancerTargetGroupsInput{ + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(os.Difference(ns)), batchSize) { + _, err := conn.DetachLoadBalancerTargetGroups(ctx, &autoscaling.DetachLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(d.Id()), TargetGroupARNs: chunk, }) @@ -1586,8 +1590,8 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } } - for _, chunk := range tfslices.Chunks(flex.ExpandStringSet(ns.Difference(os)), batchSize) { - _, err := conn.AttachLoadBalancerTargetGroupsWithContext(ctx, &autoscaling.AttachLoadBalancerTargetGroupsInput{ + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(ns.Difference(os)), batchSize) { + _, err := conn.AttachLoadBalancerTargetGroups(ctx, &autoscaling.AttachLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(d.Id()), TargetGroupARNs: chunk, }) @@ -1620,13 +1624,13 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } if shouldRefreshInstances { - var launchTemplate *autoscaling.LaunchTemplateSpecification + var launchTemplate *awstypes.LaunchTemplateSpecification if v, ok := d.GetOk("launch_template"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { launchTemplate = expandLaunchTemplateSpecification(v.([]interface{})[0].(map[string]interface{}), false) } - var mixedInstancesPolicy *autoscaling.MixedInstancesPolicy + var mixedInstancesPolicy *awstypes.MixedInstancesPolicy if v, ok := d.GetOk("mixed_instances_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { mixedInstancesPolicy = expandMixedInstancesPolicy(v.([]interface{})[0].(map[string]interface{}), true) @@ -1649,7 +1653,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter return sdkdiag.AppendFromErr(diags, err) } } else { - _, err := conn.PutWarmPoolWithContext(ctx, expandPutWarmPoolInput(d.Id(), w[0].(map[string]interface{}))) + _, err := conn.PutWarmPool(ctx, expandPutWarmPoolInput(d.Id(), w[0].(map[string]interface{}))) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Auto Scaling Warm Pool (%s): %s", d.Id(), err) @@ -1701,10 +1705,10 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter if disableMetrics := os.Difference(ns); disableMetrics.Len() != 0 { input := &autoscaling.DisableMetricsCollectionInput{ AutoScalingGroupName: aws.String(d.Id()), - Metrics: flex.ExpandStringSet(disableMetrics), + Metrics: flex.ExpandStringValueSet(disableMetrics), } - _, err := conn.DisableMetricsCollectionWithContext(ctx, input) + _, err := conn.DisableMetricsCollection(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "disabling Auto Scaling Group (%s) metrics collection: %s", d.Id(), err) @@ -1715,10 +1719,10 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter input := &autoscaling.EnableMetricsCollectionInput{ AutoScalingGroupName: aws.String(d.Id()), Granularity: aws.String(d.Get("metrics_granularity").(string)), - Metrics: flex.ExpandStringSet(enableMetrics), + Metrics: flex.ExpandStringValueSet(enableMetrics), } - _, err := conn.EnableMetricsCollectionWithContext(ctx, input) + _, err := conn.EnableMetricsCollection(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "enabling Auto Scaling Group (%s) metrics collection: %s", d.Id(), err) @@ -1738,12 +1742,12 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter ns := n.(*schema.Set) if resumeProcesses := os.Difference(ns); resumeProcesses.Len() != 0 { - input := &autoscaling.ScalingProcessQuery{ + input := &autoscaling.ResumeProcessesInput{ AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: flex.ExpandStringSet(resumeProcesses), + ScalingProcesses: flex.ExpandStringValueSet(resumeProcesses), } - _, err := conn.ResumeProcessesWithContext(ctx, input) + _, err := conn.ResumeProcesses(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "resuming Auto Scaling Group (%s) scaling processes: %s", d.Id(), err) @@ -1751,12 +1755,12 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } if suspendProcesses := ns.Difference(os); suspendProcesses.Len() != 0 { - input := &autoscaling.ScalingProcessQuery{ + input := &autoscaling.SuspendProcessesInput{ AutoScalingGroupName: aws.String(d.Id()), - ScalingProcesses: flex.ExpandStringSet(suspendProcesses), + ScalingProcesses: flex.ExpandStringValueSet(suspendProcesses), } - _, err := conn.SuspendProcessesWithContext(ctx, input) + _, err := conn.SuspendProcesses(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "suspending Auto Scaling Group (%s) scaling processes: %s", d.Id(), err) @@ -1769,12 +1773,12 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) forceDeleteGroup := d.Get("force_delete").(bool) forceDeleteWarmPool := forceDeleteGroup || d.Get("force_delete_warm_pool").(bool) - group, err := FindGroupByName(ctx, conn, d.Id()) + group, err := findGroupByName(ctx, conn, d.Id()) if tfresource.NotFound(err) { return diags @@ -1803,12 +1807,12 @@ func resourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta inter log.Printf("[DEBUG] Deleting Auto Scaling Group: %s", d.Id()) _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { - return conn.DeleteAutoScalingGroupWithContext(ctx, &autoscaling.DeleteAutoScalingGroupInput{ + return conn.DeleteAutoScalingGroup(ctx, &autoscaling.DeleteAutoScalingGroupInput{ AutoScalingGroupName: aws.String(d.Id()), ForceDelete: aws.Bool(forceDeleteGroup), }) }, - autoscaling.ErrCodeResourceInUseFault, autoscaling.ErrCodeScalingActivityInProgressFault) + errCodeResourceInUseFault, errCodeScalingActivityInProgressFault) if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { return diags @@ -1820,7 +1824,7 @@ func resourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta inter _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { - return FindGroupByName(ctx, conn, d.Id()) + return findGroupByName(ctx, conn, d.Id()) }) if err != nil { @@ -1830,16 +1834,16 @@ func resourceGroupDelete(ctx context.Context, d *schema.ResourceData, meta inter return diags } -func drainGroup(ctx context.Context, conn *autoscaling.AutoScaling, name string, instances []*autoscaling.Instance, timeout time.Duration) error { +func drainGroup(ctx context.Context, conn *autoscaling.Client, name string, instances []awstypes.Instance, timeout time.Duration) error { input := &autoscaling.UpdateAutoScalingGroupInput{ AutoScalingGroupName: aws.String(name), - DesiredCapacity: aws.Int64(0), - MinSize: aws.Int64(0), - MaxSize: aws.Int64(0), + DesiredCapacity: aws.Int32(0), + MinSize: aws.Int32(0), + MaxSize: aws.Int32(0), } log.Printf("[DEBUG] Draining Auto Scaling Group: %s", name) - if _, err := conn.UpdateAutoScalingGroupWithContext(ctx, input); err != nil { + if _, err := conn.UpdateAutoScalingGroup(ctx, input); err != nil { return fmt.Errorf("setting Auto Scaling Group (%s) capacity to 0: %w", name, err) } @@ -1854,19 +1858,19 @@ func drainGroup(ctx context.Context, conn *autoscaling.AutoScaling, name string, // Filter by ProtectedFromScaleIn to avoid unnecessary API calls (#36584) var instanceIDs []string for _, instance := range instances { - if aws.BoolValue(instance.ProtectedFromScaleIn) { - instanceIDs = append(instanceIDs, aws.StringValue(instance.InstanceId)) + if aws.ToBool(instance.ProtectedFromScaleIn) { + instanceIDs = append(instanceIDs, aws.ToString(instance.InstanceId)) } } const batchSize = 50 // API limit. for _, chunk := range tfslices.Chunks(instanceIDs, batchSize) { input := &autoscaling.SetInstanceProtectionInput{ AutoScalingGroupName: aws.String(name), - InstanceIds: aws.StringSlice(chunk), + InstanceIds: chunk, ProtectedFromScaleIn: aws.Bool(false), } - _, err := conn.SetInstanceProtectionWithContext(ctx, input) + _, err := conn.SetInstanceProtection(ctx, input) // Ignore ValidationError when instance is already fully terminated // and is not a part of Auto Scaling Group anymore. @@ -1886,7 +1890,7 @@ func drainGroup(ctx context.Context, conn *autoscaling.AutoScaling, name string, return nil } -func deleteWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name string, force bool, timeout time.Duration) error { +func deleteWarmPool(ctx context.Context, conn *autoscaling.Client, name string, force bool, timeout time.Duration) error { if !force { if err := drainWarmPool(ctx, conn, name, timeout); err != nil { return err @@ -1896,12 +1900,12 @@ func deleteWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name str log.Printf("[DEBUG] Deleting Auto Scaling Warm Pool: %s", name) _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.DeleteWarmPoolWithContext(ctx, &autoscaling.DeleteWarmPoolInput{ + return conn.DeleteWarmPool(ctx, &autoscaling.DeleteWarmPoolInput{ AutoScalingGroupName: aws.String(name), ForceDelete: aws.Bool(force), }) }, - autoscaling.ErrCodeResourceInUseFault, autoscaling.ErrCodeScalingActivityInProgressFault) + errCodeResourceInUseFault, errCodeScalingActivityInProgressFault) if tfawserr.ErrMessageContains(err, errCodeValidationError, "No warm pool found") { return nil @@ -1918,15 +1922,15 @@ func deleteWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name str return nil } -func drainWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) error { +func drainWarmPool(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) error { input := &autoscaling.PutWarmPoolInput{ AutoScalingGroupName: aws.String(name), - MaxGroupPreparedCapacity: aws.Int64(0), - MinSize: aws.Int64(0), + MaxGroupPreparedCapacity: aws.Int32(0), + MinSize: aws.Int32(0), } log.Printf("[DEBUG] Draining Auto Scaling Warm Pool: %s", name) - if _, err := conn.PutWarmPoolWithContext(ctx, input); err != nil { + if _, err := conn.PutWarmPool(ctx, input); err != nil { return fmt.Errorf("setting Auto Scaling Warm Pool (%s) capacity to 0: %w", name, err) } @@ -1937,11 +1941,11 @@ func drainWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name stri return nil } -func findELBInstanceStates(ctx context.Context, conn *elb.ELB, g *autoscaling.Group) (map[string]map[string]string, error) { +func findELBInstanceStates(ctx context.Context, conn *elb.ELB, g *awstypes.AutoScalingGroup) (map[string]map[string]string, error) { instanceStates := make(map[string]map[string]string) for _, v := range g.LoadBalancerNames { - lbName := aws.StringValue(v) + lbName := v input := &elb.DescribeInstanceHealthInput{ LoadBalancerName: aws.String(lbName), } @@ -1955,11 +1959,11 @@ func findELBInstanceStates(ctx context.Context, conn *elb.ELB, g *autoscaling.Gr instanceStates[lbName] = make(map[string]string) for _, v := range output.InstanceStates { - instanceID := aws.StringValue(v.InstanceId) + instanceID := aws.ToString(v.InstanceId) if instanceID == "" { continue } - state := aws.StringValue(v.State) + state := aws.ToString(v.State) if state == "" { continue } @@ -1971,11 +1975,11 @@ func findELBInstanceStates(ctx context.Context, conn *elb.ELB, g *autoscaling.Gr return instanceStates, nil } -func findELBV2InstanceStates(ctx context.Context, conn *elbv2.ELBV2, g *autoscaling.Group) (map[string]map[string]string, error) { +func findELBV2InstanceStates(ctx context.Context, conn *elbv2.ELBV2, g *awstypes.AutoScalingGroup) (map[string]map[string]string, error) { instanceStates := make(map[string]map[string]string) for _, v := range g.TargetGroupARNs { - targetGroupARN := aws.StringValue(v) + targetGroupARN := v input := &elbv2.DescribeTargetHealthInput{ TargetGroupArn: aws.String(targetGroupARN), } @@ -1993,11 +1997,11 @@ func findELBV2InstanceStates(ctx context.Context, conn *elbv2.ELBV2, g *autoscal continue } - instanceID := aws.StringValue(v.Target.Id) + instanceID := aws.ToString(v.Target.Id) if instanceID == "" { continue } - state := aws.StringValue(v.TargetHealth.State) + state := aws.ToString(v.TargetHealth.State) if state == "" { continue } @@ -2009,53 +2013,36 @@ func findELBV2InstanceStates(ctx context.Context, conn *elbv2.ELBV2, g *autoscal return instanceStates, nil } -func findGroup(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.Group, error) { +func findGroup(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeAutoScalingGroupsInput) (*awstypes.AutoScalingGroup, error) { output, err := findGroups(ctx, conn, input) if err != nil { return nil, err } - if len(output) == 0 || output[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output[0], nil + return tfresource.AssertSingleValueResult(output) } -func findGroups(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeAutoScalingGroupsInput) ([]*autoscaling.Group, error) { - var output []*autoscaling.Group +func findGroups(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeAutoScalingGroupsInput) ([]awstypes.AutoScalingGroup, error) { + var output []awstypes.AutoScalingGroup - err := conn.DescribeAutoScalingGroupsPagesWithContext(ctx, input, func(page *autoscaling.DescribeAutoScalingGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := autoscaling.NewDescribeAutoScalingGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.AutoScalingGroups { - if v == nil { - continue - } - - output = append(output, v) + if err != nil { + return nil, err } - return !lastPage - }) - - if err != nil { - return nil, err + output = append(output, page.AutoScalingGroups...) } return output, nil } -func FindGroupByName(ctx context.Context, conn *autoscaling.AutoScaling, name string) (*autoscaling.Group, error) { +func findGroupByName(ctx context.Context, conn *autoscaling.Client, name string) (*awstypes.AutoScalingGroup, error) { input := &autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: aws.StringSlice([]string{name}), + AutoScalingGroupNames: []string{name}, } output, err := findGroup(ctx, conn, input) @@ -2065,7 +2052,7 @@ func FindGroupByName(ctx context.Context, conn *autoscaling.AutoScaling, name st } // Eventual consistency check. - if aws.StringValue(output.AutoScalingGroupName) != name { + if aws.ToString(output.AutoScalingGroupName) != name { return nil, &retry.NotFoundError{ LastRequest: input, } @@ -2074,165 +2061,123 @@ func FindGroupByName(ctx context.Context, conn *autoscaling.AutoScaling, name st return output, nil } -func findInstanceRefresh(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeInstanceRefreshesInput) (*autoscaling.InstanceRefresh, error) { - output, err := FindInstanceRefreshes(ctx, conn, input) +func findInstanceRefresh(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeInstanceRefreshesInput) (*awstypes.InstanceRefresh, error) { + output, err := findInstanceRefreshes(ctx, conn, input) if err != nil { return nil, err } - if len(output) == 0 || output[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output[0], nil + return tfresource.AssertSingleValueResult(output) } -func FindInstanceRefreshes(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeInstanceRefreshesInput) ([]*autoscaling.InstanceRefresh, error) { - var output []*autoscaling.InstanceRefresh +func findInstanceRefreshes(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeInstanceRefreshesInput) ([]awstypes.InstanceRefresh, error) { + var output []awstypes.InstanceRefresh - err := describeInstanceRefreshesPages(ctx, conn, input, func(page *autoscaling.DescribeInstanceRefreshesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := autoscaling.NewDescribeInstanceRefreshesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.InstanceRefreshes { - if v == nil { - continue + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } - - output = append(output, v) } - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.InstanceRefreshes...) } return output, nil } -func findLoadBalancerStates(ctx context.Context, conn *autoscaling.AutoScaling, name string) ([]*autoscaling.LoadBalancerState, error) { +func findLoadBalancerStates(ctx context.Context, conn *autoscaling.Client, name string) ([]awstypes.LoadBalancerState, error) { input := &autoscaling.DescribeLoadBalancersInput{ AutoScalingGroupName: aws.String(name), } - var output []*autoscaling.LoadBalancerState + var output []awstypes.LoadBalancerState - err := describeLoadBalancersPages(ctx, conn, input, func(page *autoscaling.DescribeLoadBalancersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := autoscaling.NewDescribeLoadBalancersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.LoadBalancers { - if v == nil { - continue + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } - - output = append(output, v) } - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.LoadBalancers...) } return output, nil } -func findLoadBalancerTargetGroupStates(ctx context.Context, conn *autoscaling.AutoScaling, name string) ([]*autoscaling.LoadBalancerTargetGroupState, error) { +func findLoadBalancerTargetGroupStates(ctx context.Context, conn *autoscaling.Client, name string) ([]awstypes.LoadBalancerTargetGroupState, error) { input := &autoscaling.DescribeLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(name), } - var output []*autoscaling.LoadBalancerTargetGroupState + var output []awstypes.LoadBalancerTargetGroupState - err := describeLoadBalancerTargetGroupsPages(ctx, conn, input, func(page *autoscaling.DescribeLoadBalancerTargetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := autoscaling.NewDescribeLoadBalancerTargetGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.LoadBalancerTargetGroups { - if v == nil { - continue + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } - - output = append(output, v) } - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.LoadBalancerTargetGroups...) } return output, nil } -func findScalingActivities(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeScalingActivitiesInput, startTime time.Time) ([]*autoscaling.Activity, error) { - var output []*autoscaling.Activity +func findScalingActivities(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeScalingActivitiesInput, startTime time.Time) ([]awstypes.Activity, error) { + var output []awstypes.Activity - err := conn.DescribeScalingActivitiesPagesWithContext(ctx, input, func(page *autoscaling.DescribeScalingActivitiesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := autoscaling.NewDescribeScalingActivitiesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, activity := range page.Activities { - if activity == nil { - continue - } - - if startTime.Before(aws.TimeValue(activity.StartTime)) { - output = append(output, activity) + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, activity := range page.Activities { + if startTime.Before(aws.ToTime(activity.StartTime)) { + output = append(output, activity) + } + } } return output, nil } -func findScalingActivitiesByName(ctx context.Context, conn *autoscaling.AutoScaling, name string, startTime time.Time) ([]*autoscaling.Activity, error) { +func findScalingActivitiesByName(ctx context.Context, conn *autoscaling.Client, name string, startTime time.Time) ([]awstypes.Activity, error) { input := &autoscaling.DescribeScalingActivitiesInput{ AutoScalingGroupName: aws.String(name), } @@ -2240,40 +2185,31 @@ func findScalingActivitiesByName(ctx context.Context, conn *autoscaling.AutoScal return findScalingActivities(ctx, conn, input, startTime) } -func findTrafficSourceStates(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeTrafficSourcesInput) ([]*autoscaling.TrafficSourceState, error) { - var output []*autoscaling.TrafficSourceState +func findTrafficSourceStates(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeTrafficSourcesInput) ([]awstypes.TrafficSourceState, error) { + var output []awstypes.TrafficSourceState - err := conn.DescribeTrafficSourcesPagesWithContext(ctx, input, func(page *autoscaling.DescribeTrafficSourcesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := autoscaling.NewDescribeTrafficSourcesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.TrafficSources { - if v == nil { - continue + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } - - output = append(output, v) } - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.TrafficSources...) } return output, nil } -func findTrafficSourceStatesByTwoPartKey(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType string) ([]*autoscaling.TrafficSourceState, error) { +func findTrafficSourceStatesByTwoPartKey(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType string) ([]awstypes.TrafficSourceState, error) { input := &autoscaling.DescribeTrafficSourcesInput{ AutoScalingGroupName: aws.String(asgName), } @@ -2284,15 +2220,22 @@ func findTrafficSourceStatesByTwoPartKey(ctx context.Context, conn *autoscaling. return findTrafficSourceStates(ctx, conn, input) } -func findWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name string) (*autoscaling.DescribeWarmPoolOutput, error) { - input := &autoscaling.DescribeWarmPoolInput{ - AutoScalingGroupName: aws.String(name), - } +func findWarmPool(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeWarmPoolInput) (*autoscaling.DescribeWarmPoolOutput, error) { var output *autoscaling.DescribeWarmPoolOutput - err := describeWarmPoolPages(ctx, conn, input, func(page *autoscaling.DescribeWarmPoolOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := autoscaling.NewDescribeWarmPoolPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } if output == nil { @@ -2300,29 +2243,24 @@ func findWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name strin } else { output.Instances = append(output.Instances, page.Instances...) } - - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err } if output == nil || output.WarmPoolConfiguration == nil { - return nil, tfresource.NewEmptyResultError(name) + return nil, tfresource.NewEmptyResultError(input) } return output, nil } -func statusGroupCapacity(ctx context.Context, conn *autoscaling.AutoScaling, elbconn *elb.ELB, elbv2conn *elbv2.ELBV2, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool) retry.StateRefreshFunc { +func findWarmPoolByName(ctx context.Context, conn *autoscaling.Client, name string) (*autoscaling.DescribeWarmPoolOutput, error) { + input := &autoscaling.DescribeWarmPoolInput{ + AutoScalingGroupName: aws.String(name), + } + + return findWarmPool(ctx, conn, input) +} + +func statusGroupCapacity(ctx context.Context, conn *autoscaling.Client, elbconn *elb.ELB, elbv2conn *elbv2.ELBV2, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool) retry.StateRefreshFunc { return func() (interface{}, string, error) { if !ignoreFailedScalingActivities { // Check for fatal error in activity logs. @@ -2335,12 +2273,12 @@ func statusGroupCapacity(ctx context.Context, conn *autoscaling.AutoScaling, elb var errs []error for _, v := range scalingActivities { - if statusCode := aws.StringValue(v.StatusCode); statusCode == autoscaling.ScalingActivityStatusCodeFailed && aws.Int64Value(v.Progress) == 100 { - if strings.Contains(aws.StringValue(v.StatusMessage), "Invalid IAM Instance Profile") { + if v.StatusCode == awstypes.ScalingActivityStatusCodeFailed && aws.ToInt32(v.Progress) == 100 { + if strings.Contains(aws.ToString(v.StatusMessage), "Invalid IAM Instance Profile") { // the activity will likely be retried continue } - errs = append(errs, fmt.Errorf("scaling activity (%s): %s: %s", aws.StringValue(v.ActivityId), statusCode, aws.StringValue(v.StatusMessage))) + errs = append(errs, fmt.Errorf("scaling activity (%s): %s: %s", aws.ToString(v.ActivityId), v.StatusCode, aws.ToString(v.StatusMessage))) } } @@ -2351,7 +2289,7 @@ func statusGroupCapacity(ctx context.Context, conn *autoscaling.AutoScaling, elb } } - g, err := FindGroupByName(ctx, conn, name) + g, err := findGroupByName(ctx, conn, name) if err != nil { return nil, "", fmt.Errorf("reading Auto Scaling Group (%s): %w", name, err) @@ -2373,21 +2311,21 @@ func statusGroupCapacity(ctx context.Context, conn *autoscaling.AutoScaling, elb nELB := 0 for _, v := range g.Instances { - instanceID := aws.StringValue(v.InstanceId) + instanceID := aws.ToString(v.InstanceId) if instanceID == "" { continue } - if aws.StringValue(v.HealthStatus) != InstanceHealthStatusHealthy { + if aws.ToString(v.HealthStatus) != InstanceHealthStatusHealthy { continue } - if aws.StringValue(v.LifecycleState) != autoscaling.LifecycleStateInService { + if v.LifecycleState != awstypes.LifecycleStateInService { continue } increment := 1 - if v := aws.StringValue(v.WeightedCapacity); v != "" { + if v := aws.ToString(v.WeightedCapacity); v != "" { v, _ := strconv.Atoi(v) increment = v } @@ -2427,9 +2365,9 @@ func statusGroupCapacity(ctx context.Context, conn *autoscaling.AutoScaling, elb } } -func statusGroupInstanceCount(ctx context.Context, conn *autoscaling.AutoScaling, name string) retry.StateRefreshFunc { +func statusGroupInstanceCount(ctx context.Context, conn *autoscaling.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindGroupByName(ctx, conn, name) + output, err := findGroupByName(ctx, conn, name) if tfresource.NotFound(err) { return nil, "", nil @@ -2443,11 +2381,11 @@ func statusGroupInstanceCount(ctx context.Context, conn *autoscaling.AutoScaling } } -func statusInstanceRefresh(ctx context.Context, conn *autoscaling.AutoScaling, name, id string) retry.StateRefreshFunc { +func statusInstanceRefresh(ctx context.Context, conn *autoscaling.Client, name, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { input := &autoscaling.DescribeInstanceRefreshesInput{ AutoScalingGroupName: aws.String(name), - InstanceRefreshIds: aws.StringSlice([]string{id}), + InstanceRefreshIds: []string{id}, } output, err := findInstanceRefresh(ctx, conn, input) @@ -2460,11 +2398,11 @@ func statusInstanceRefresh(ctx context.Context, conn *autoscaling.AutoScaling, n return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func statusLoadBalancerInStateCount(ctx context.Context, conn *autoscaling.AutoScaling, name string, states ...string) retry.StateRefreshFunc { +func statusLoadBalancerInStateCount(ctx context.Context, conn *autoscaling.Client, name string, states ...string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findLoadBalancerStates(ctx, conn, name) @@ -2480,7 +2418,7 @@ func statusLoadBalancerInStateCount(ctx context.Context, conn *autoscaling.AutoS for _, v := range output { for _, state := range states { - if aws.StringValue(v.State) == state { + if aws.ToString(v.State) == state { count++ break } @@ -2491,7 +2429,7 @@ func statusLoadBalancerInStateCount(ctx context.Context, conn *autoscaling.AutoS } } -func statusLoadBalancerTargetGroupInStateCount(ctx context.Context, conn *autoscaling.AutoScaling, name string, states ...string) retry.StateRefreshFunc { +func statusLoadBalancerTargetGroupInStateCount(ctx context.Context, conn *autoscaling.Client, name string, states ...string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findLoadBalancerTargetGroupStates(ctx, conn, name) @@ -2507,7 +2445,7 @@ func statusLoadBalancerTargetGroupInStateCount(ctx context.Context, conn *autosc for _, v := range output { for _, state := range states { - if aws.StringValue(v.State) == state { + if aws.ToString(v.State) == state { count++ break } @@ -2518,7 +2456,7 @@ func statusLoadBalancerTargetGroupInStateCount(ctx context.Context, conn *autosc } } -func statusTrafficSourcesInStateCount(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType string, states ...string) retry.StateRefreshFunc { +func statusTrafficSourcesInStateCount(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType string, states ...string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findTrafficSourceStatesByTwoPartKey(ctx, conn, asgName, trafficSourceType) @@ -2534,7 +2472,7 @@ func statusTrafficSourcesInStateCount(ctx context.Context, conn *autoscaling.Aut for _, v := range output { for _, state := range states { - if aws.StringValue(v.State) == state { + if aws.ToString(v.State) == state { count++ break } @@ -2545,9 +2483,9 @@ func statusTrafficSourcesInStateCount(ctx context.Context, conn *autoscaling.Aut } } -func statusWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name string) retry.StateRefreshFunc { +func statusWarmPool(ctx context.Context, conn *autoscaling.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findWarmPool(ctx, conn, name) + output, err := findWarmPoolByName(ctx, conn, name) if tfresource.NotFound(err) { return nil, "", nil @@ -2557,13 +2495,13 @@ func statusWarmPool(ctx context.Context, conn *autoscaling.AutoScaling, name str return nil, "", err } - return output.WarmPoolConfiguration, aws.StringValue(output.WarmPoolConfiguration.Status), nil + return output.WarmPoolConfiguration, string(output.WarmPoolConfiguration.Status), nil } } -func statusWarmPoolInstanceCount(ctx context.Context, conn *autoscaling.AutoScaling, name string) retry.StateRefreshFunc { +func statusWarmPoolInstanceCount(ctx context.Context, conn *autoscaling.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findWarmPool(ctx, conn, name) + output, err := findWarmPoolByName(ctx, conn, name) if tfresource.NotFound(err) { return nil, "", nil @@ -2577,7 +2515,7 @@ func statusWarmPoolInstanceCount(ctx context.Context, conn *autoscaling.AutoScal } } -func waitGroupCapacitySatisfied(ctx context.Context, conn *autoscaling.AutoScaling, elbconn *elb.ELB, elbv2conn *elbv2.ELBV2, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool, timeout time.Duration) error { +func waitGroupCapacitySatisfied(ctx context.Context, conn *autoscaling.Client, elbconn *elb.ELB, elbv2conn *elbv2.ELBV2, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ Target: []string{"ok"}, Refresh: statusGroupCapacity(ctx, conn, elbconn, elbv2conn, name, cb, startTime, ignoreFailedScalingActivities), @@ -2593,7 +2531,7 @@ func waitGroupCapacitySatisfied(ctx context.Context, conn *autoscaling.AutoScali return err } -func waitGroupDrained(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) (*autoscaling.Group, error) { +func waitGroupDrained(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) (*awstypes.AutoScalingGroup, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusGroupInstanceCount(ctx, conn, name), @@ -2602,14 +2540,14 @@ func waitGroupDrained(ctx context.Context, conn *autoscaling.AutoScaling, name s outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*autoscaling.Group); ok { + if output, ok := outputRaw.(*awstypes.AutoScalingGroup); ok { return output, err } return nil, err } -func waitLoadBalancersAdded(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) ([]*autoscaling.LoadBalancerState, error) { +func waitLoadBalancersAdded(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) ([]*awstypes.LoadBalancerState, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusLoadBalancerInStateCount(ctx, conn, name, LoadBalancerStateAdding), @@ -2618,14 +2556,14 @@ func waitLoadBalancersAdded(ctx context.Context, conn *autoscaling.AutoScaling, outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.([]*autoscaling.LoadBalancerState); ok { + if output, ok := outputRaw.([]*awstypes.LoadBalancerState); ok { return output, err } return nil, err } -func waitLoadBalancersRemoved(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) ([]*autoscaling.LoadBalancerState, error) { +func waitLoadBalancersRemoved(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) ([]*awstypes.LoadBalancerState, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusLoadBalancerInStateCount(ctx, conn, name, LoadBalancerStateRemoving), @@ -2634,14 +2572,14 @@ func waitLoadBalancersRemoved(ctx context.Context, conn *autoscaling.AutoScaling outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.([]*autoscaling.LoadBalancerState); ok { + if output, ok := outputRaw.([]*awstypes.LoadBalancerState); ok { return output, err } return nil, err } -func waitLoadBalancerTargetGroupsAdded(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) ([]*autoscaling.LoadBalancerTargetGroupState, error) { +func waitLoadBalancerTargetGroupsAdded(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) ([]*awstypes.LoadBalancerTargetGroupState, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusLoadBalancerTargetGroupInStateCount(ctx, conn, name, LoadBalancerTargetGroupStateAdding), @@ -2650,14 +2588,14 @@ func waitLoadBalancerTargetGroupsAdded(ctx context.Context, conn *autoscaling.Au outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.([]*autoscaling.LoadBalancerTargetGroupState); ok { + if output, ok := outputRaw.([]*awstypes.LoadBalancerTargetGroupState); ok { return output, err } return nil, err } -func waitLoadBalancerTargetGroupsRemoved(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) ([]*autoscaling.LoadBalancerTargetGroupState, error) { +func waitLoadBalancerTargetGroupsRemoved(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) ([]*awstypes.LoadBalancerTargetGroupState, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusLoadBalancerTargetGroupInStateCount(ctx, conn, name, LoadBalancerTargetGroupStateRemoving), @@ -2666,14 +2604,14 @@ func waitLoadBalancerTargetGroupsRemoved(ctx context.Context, conn *autoscaling. outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.([]*autoscaling.LoadBalancerTargetGroupState); ok { + if output, ok := outputRaw.([]*awstypes.LoadBalancerTargetGroupState); ok { return output, err } return nil, err } -func waitTrafficSourcesCreated(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType string, timeout time.Duration) ([]*autoscaling.TrafficSourceState, error) { +func waitTrafficSourcesCreated(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType string, timeout time.Duration) ([]*awstypes.TrafficSourceState, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusTrafficSourcesInStateCount(ctx, conn, asgName, trafficSourceType, TrafficSourceStateAdding), @@ -2682,14 +2620,14 @@ func waitTrafficSourcesCreated(ctx context.Context, conn *autoscaling.AutoScalin outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.([]*autoscaling.TrafficSourceState); ok { + if output, ok := outputRaw.([]*awstypes.TrafficSourceState); ok { return output, err } return nil, err } -func waitTrafficSourcesDeleted(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType string, timeout time.Duration) ([]*autoscaling.TrafficSourceState, error) { +func waitTrafficSourcesDeleted(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType string, timeout time.Duration) ([]*awstypes.TrafficSourceState, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusTrafficSourcesInStateCount(ctx, conn, asgName, trafficSourceType, TrafficSourceStateRemoving), @@ -2698,7 +2636,7 @@ func waitTrafficSourcesDeleted(ctx context.Context, conn *autoscaling.AutoScalin outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.([]*autoscaling.TrafficSourceState); ok { + if output, ok := outputRaw.([]*awstypes.TrafficSourceState); ok { return output, err } @@ -2715,34 +2653,34 @@ const ( instanceRefreshCancelledTimeout = 15 * time.Minute ) -func waitInstanceRefreshCancelled(ctx context.Context, conn *autoscaling.AutoScaling, name, id string, timeout time.Duration) (*autoscaling.InstanceRefresh, error) { +func waitInstanceRefreshCancelled(ctx context.Context, conn *autoscaling.Client, name, id string, timeout time.Duration) (*awstypes.InstanceRefresh, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{ - autoscaling.InstanceRefreshStatusCancelling, - autoscaling.InstanceRefreshStatusInProgress, - autoscaling.InstanceRefreshStatusPending, - }, - Target: []string{ - autoscaling.InstanceRefreshStatusCancelled, - autoscaling.InstanceRefreshStatusFailed, - autoscaling.InstanceRefreshStatusSuccessful, - }, + Pending: enum.Slice( + awstypes.InstanceRefreshStatusCancelling, + awstypes.InstanceRefreshStatusInProgress, + awstypes.InstanceRefreshStatusPending, + ), + Target: enum.Slice( + awstypes.InstanceRefreshStatusCancelled, + awstypes.InstanceRefreshStatusFailed, + awstypes.InstanceRefreshStatusSuccessful, + ), Refresh: statusInstanceRefresh(ctx, conn, name, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*autoscaling.InstanceRefresh); ok { + if output, ok := outputRaw.(*awstypes.InstanceRefresh); ok { return output, err } return nil, err } -func waitWarmPoolDeleted(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) (*autoscaling.WarmPoolConfiguration, error) { +func waitWarmPoolDeleted(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) (*awstypes.WarmPoolConfiguration, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{autoscaling.WarmPoolStatusPendingDelete}, + Pending: enum.Slice(awstypes.WarmPoolStatusPendingDelete), Target: []string{}, Refresh: statusWarmPool(ctx, conn, name), Timeout: timeout, @@ -2750,14 +2688,14 @@ func waitWarmPoolDeleted(ctx context.Context, conn *autoscaling.AutoScaling, nam outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*autoscaling.WarmPoolConfiguration); ok { + if output, ok := outputRaw.(*awstypes.WarmPoolConfiguration); ok { return output, err } return nil, err } -func waitWarmPoolDrained(ctx context.Context, conn *autoscaling.AutoScaling, name string, timeout time.Duration) (*autoscaling.DescribeWarmPoolOutput, error) { +func waitWarmPoolDrained(ctx context.Context, conn *autoscaling.Client, name string, timeout time.Duration) (*autoscaling.DescribeWarmPoolOutput, error) { stateConf := &retry.StateChangeConf{ Target: []string{"0"}, Refresh: statusWarmPoolInstanceCount(ctx, conn, name), @@ -2773,23 +2711,23 @@ func waitWarmPoolDrained(ctx context.Context, conn *autoscaling.AutoScaling, nam return nil, err } -func expandInstancesDistribution(tfMap map[string]interface{}) *autoscaling.InstancesDistribution { +func expandInstancesDistribution(tfMap map[string]interface{}) *awstypes.InstancesDistribution { if tfMap == nil { return nil } - apiObject := &autoscaling.InstancesDistribution{} + apiObject := &awstypes.InstancesDistribution{} if v, ok := tfMap["on_demand_allocation_strategy"].(string); ok && v != "" { apiObject.OnDemandAllocationStrategy = aws.String(v) } if v, ok := tfMap["on_demand_base_capacity"].(int); ok { - apiObject.OnDemandBaseCapacity = aws.Int64(int64(v)) + apiObject.OnDemandBaseCapacity = aws.Int32(int32(v)) } if v, ok := tfMap["on_demand_percentage_above_base_capacity"].(int); ok { - apiObject.OnDemandPercentageAboveBaseCapacity = aws.Int64(int64(v)) + apiObject.OnDemandPercentageAboveBaseCapacity = aws.Int32(int32(v)) } if v, ok := tfMap["spot_allocation_strategy"].(string); ok && v != "" { @@ -2797,7 +2735,7 @@ func expandInstancesDistribution(tfMap map[string]interface{}) *autoscaling.Inst } if v, ok := tfMap["spot_instance_pools"].(int); ok && v != 0 { - apiObject.SpotInstancePools = aws.Int64(int64(v)) + apiObject.SpotInstancePools = aws.Int32(int32(v)) } if v, ok := tfMap["spot_max_price"].(string); ok { @@ -2807,12 +2745,12 @@ func expandInstancesDistribution(tfMap map[string]interface{}) *autoscaling.Inst return apiObject } -func expandLaunchTemplate(tfMap map[string]interface{}, hasDefaultVersion bool) *autoscaling.LaunchTemplate { +func expandLaunchTemplate(tfMap map[string]interface{}, hasDefaultVersion bool) *awstypes.LaunchTemplate { if tfMap == nil { return nil } - apiObject := &autoscaling.LaunchTemplate{} + apiObject := &awstypes.LaunchTemplate{} if v, ok := tfMap["launch_template_specification"].([]interface{}); ok && len(v) > 0 { apiObject.LaunchTemplateSpecification = expandLaunchTemplateSpecificationForMixedInstancesPolicy(v[0].(map[string]interface{}), hasDefaultVersion) @@ -2825,12 +2763,8 @@ func expandLaunchTemplate(tfMap map[string]interface{}, hasDefaultVersion bool) return apiObject } -func expandLaunchTemplateOverrides(tfMap map[string]interface{}, hasDefaultVersion bool) *autoscaling.LaunchTemplateOverrides { - if tfMap == nil { - return nil - } - - apiObject := &autoscaling.LaunchTemplateOverrides{} +func expandLaunchTemplateOverrides(tfMap map[string]interface{}, hasDefaultVersion bool) awstypes.LaunchTemplateOverrides { + apiObject := awstypes.LaunchTemplateOverrides{} if v, ok := tfMap["instance_requirements"].([]interface{}); ok && len(v) > 0 { apiObject.InstanceRequirements = expandInstanceRequirements(v[0].(map[string]interface{})) @@ -2851,12 +2785,12 @@ func expandLaunchTemplateOverrides(tfMap map[string]interface{}, hasDefaultVersi return apiObject } -func expandLaunchTemplateOverrideses(tfList []interface{}, hasDefaultVersion bool) []*autoscaling.LaunchTemplateOverrides { +func expandLaunchTemplateOverrideses(tfList []interface{}, hasDefaultVersion bool) []awstypes.LaunchTemplateOverrides { if len(tfList) == 0 { return nil } - var apiObjects []*autoscaling.LaunchTemplateOverrides + var apiObjects []awstypes.LaunchTemplateOverrides for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -2866,34 +2800,29 @@ func expandLaunchTemplateOverrideses(tfList []interface{}, hasDefaultVersion boo } apiObject := expandLaunchTemplateOverrides(tfMap, hasDefaultVersion) - - if apiObject == nil { - continue - } - apiObjects = append(apiObjects, apiObject) } return apiObjects } -func expandInstanceRequirements(tfMap map[string]interface{}) *autoscaling.InstanceRequirements { +func expandInstanceRequirements(tfMap map[string]interface{}) *awstypes.InstanceRequirements { if tfMap == nil { return nil } - apiObject := &autoscaling.InstanceRequirements{} + apiObject := &awstypes.InstanceRequirements{} if v, ok := tfMap["accelerator_count"].([]interface{}); ok && len(v) > 0 { apiObject.AcceleratorCount = expandAcceleratorCountRequest(v[0].(map[string]interface{})) } if v, ok := tfMap["accelerator_manufacturers"].(*schema.Set); ok && v.Len() > 0 { - apiObject.AcceleratorManufacturers = flex.ExpandStringSet(v) + apiObject.AcceleratorManufacturers = flex.ExpandStringyValueSet[awstypes.AcceleratorManufacturer](v) } if v, ok := tfMap["accelerator_names"].(*schema.Set); ok && v.Len() > 0 { - apiObject.AcceleratorNames = flex.ExpandStringSet(v) + apiObject.AcceleratorNames = flex.ExpandStringyValueSet[awstypes.AcceleratorName](v) } if v, ok := tfMap["accelerator_total_memory_mib"].([]interface{}); ok && len(v) > 0 { @@ -2901,15 +2830,15 @@ func expandInstanceRequirements(tfMap map[string]interface{}) *autoscaling.Insta } if v, ok := tfMap["accelerator_types"].(*schema.Set); ok && v.Len() > 0 { - apiObject.AcceleratorTypes = flex.ExpandStringSet(v) + apiObject.AcceleratorTypes = flex.ExpandStringyValueSet[awstypes.AcceleratorType](v) } if v, ok := tfMap["allowed_instance_types"].(*schema.Set); ok && v.Len() > 0 { - apiObject.AllowedInstanceTypes = flex.ExpandStringSet(v) + apiObject.AllowedInstanceTypes = flex.ExpandStringValueSet(v) } if v, ok := tfMap["bare_metal"].(string); ok && v != "" { - apiObject.BareMetal = aws.String(v) + apiObject.BareMetal = awstypes.BareMetal(v) } if v, ok := tfMap["baseline_ebs_bandwidth_mbps"].([]interface{}); ok && len(v) > 0 { @@ -2917,27 +2846,27 @@ func expandInstanceRequirements(tfMap map[string]interface{}) *autoscaling.Insta } if v, ok := tfMap["burstable_performance"].(string); ok && v != "" { - apiObject.BurstablePerformance = aws.String(v) + apiObject.BurstablePerformance = awstypes.BurstablePerformance(v) } if v, ok := tfMap["cpu_manufacturers"].(*schema.Set); ok && v.Len() > 0 { - apiObject.CpuManufacturers = flex.ExpandStringSet(v) + apiObject.CpuManufacturers = flex.ExpandStringyValueSet[awstypes.CpuManufacturer](v) } if v, ok := tfMap["excluded_instance_types"].(*schema.Set); ok && v.Len() > 0 { - apiObject.ExcludedInstanceTypes = flex.ExpandStringSet(v) + apiObject.ExcludedInstanceTypes = flex.ExpandStringValueSet(v) } if v, ok := tfMap["instance_generations"].(*schema.Set); ok && v.Len() > 0 { - apiObject.InstanceGenerations = flex.ExpandStringSet(v) + apiObject.InstanceGenerations = flex.ExpandStringyValueSet[awstypes.InstanceGeneration](v) } if v, ok := tfMap["local_storage"].(string); ok && v != "" { - apiObject.LocalStorage = aws.String(v) + apiObject.LocalStorage = awstypes.LocalStorage(v) } if v, ok := tfMap["local_storage_types"].(*schema.Set); ok && v.Len() > 0 { - apiObject.LocalStorageTypes = flex.ExpandStringSet(v) + apiObject.LocalStorageTypes = flex.ExpandStringyValueSet[awstypes.LocalStorageType](v) } if v, ok := tfMap["memory_gib_per_vcpu"].([]interface{}); ok && len(v) > 0 { @@ -2957,7 +2886,7 @@ func expandInstanceRequirements(tfMap map[string]interface{}) *autoscaling.Insta } if v, ok := tfMap["on_demand_max_price_percentage_over_lowest_price"].(int); ok && v != 0 { - apiObject.OnDemandMaxPricePercentageOverLowestPrice = aws.Int64(int64(v)) + apiObject.OnDemandMaxPricePercentageOverLowestPrice = aws.Int32(int32(v)) } if v, ok := tfMap["require_hibernate_support"].(bool); ok && v { @@ -2965,7 +2894,7 @@ func expandInstanceRequirements(tfMap map[string]interface{}) *autoscaling.Insta } if v, ok := tfMap["spot_max_price_percentage_over_lowest_price"].(int); ok && v != 0 { - apiObject.SpotMaxPricePercentageOverLowestPrice = aws.Int64(int64(v)) + apiObject.SpotMaxPricePercentageOverLowestPrice = aws.Int32(int32(v)) } if v, ok := tfMap["total_local_storage_gb"].([]interface{}); ok && len(v) > 0 { @@ -2979,72 +2908,72 @@ func expandInstanceRequirements(tfMap map[string]interface{}) *autoscaling.Insta return apiObject } -func expandAcceleratorCountRequest(tfMap map[string]interface{}) *autoscaling.AcceleratorCountRequest { +func expandAcceleratorCountRequest(tfMap map[string]interface{}) *awstypes.AcceleratorCountRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.AcceleratorCountRequest{} + apiObject := &awstypes.AcceleratorCountRequest{} var min int if v, ok := tfMap["min"].(int); ok { min = v - apiObject.Min = aws.Int64(int64(v)) + apiObject.Min = aws.Int32(int32(v)) } if v, ok := tfMap["max"].(int); ok && v >= min { - apiObject.Max = aws.Int64(int64(v)) + apiObject.Max = aws.Int32(int32(v)) } return apiObject } -func expandAcceleratorTotalMemoryMiBRequest(tfMap map[string]interface{}) *autoscaling.AcceleratorTotalMemoryMiBRequest { +func expandAcceleratorTotalMemoryMiBRequest(tfMap map[string]interface{}) *awstypes.AcceleratorTotalMemoryMiBRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.AcceleratorTotalMemoryMiBRequest{} + apiObject := &awstypes.AcceleratorTotalMemoryMiBRequest{} var min int if v, ok := tfMap["min"].(int); ok { min = v - apiObject.Min = aws.Int64(int64(v)) + apiObject.Min = aws.Int32(int32(v)) } if v, ok := tfMap["max"].(int); ok && v >= min { - apiObject.Max = aws.Int64(int64(v)) + apiObject.Max = aws.Int32(int32(v)) } return apiObject } -func expandBaselineEBSBandwidthMbpsRequest(tfMap map[string]interface{}) *autoscaling.BaselineEbsBandwidthMbpsRequest { +func expandBaselineEBSBandwidthMbpsRequest(tfMap map[string]interface{}) *awstypes.BaselineEbsBandwidthMbpsRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.BaselineEbsBandwidthMbpsRequest{} + apiObject := &awstypes.BaselineEbsBandwidthMbpsRequest{} var min int if v, ok := tfMap["min"].(int); ok { min = v - apiObject.Min = aws.Int64(int64(v)) + apiObject.Min = aws.Int32(int32(v)) } if v, ok := tfMap["max"].(int); ok && v >= min { - apiObject.Max = aws.Int64(int64(v)) + apiObject.Max = aws.Int32(int32(v)) } return apiObject } -func expandMemoryGiBPerVCPURequest(tfMap map[string]interface{}) *autoscaling.MemoryGiBPerVCpuRequest { +func expandMemoryGiBPerVCPURequest(tfMap map[string]interface{}) *awstypes.MemoryGiBPerVCpuRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.MemoryGiBPerVCpuRequest{} + apiObject := &awstypes.MemoryGiBPerVCpuRequest{} var min float64 if v, ok := tfMap["min"].(float64); ok { @@ -3059,32 +2988,32 @@ func expandMemoryGiBPerVCPURequest(tfMap map[string]interface{}) *autoscaling.Me return apiObject } -func expandMemoryMiBRequest(tfMap map[string]interface{}) *autoscaling.MemoryMiBRequest { +func expandMemoryMiBRequest(tfMap map[string]interface{}) *awstypes.MemoryMiBRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.MemoryMiBRequest{} + apiObject := &awstypes.MemoryMiBRequest{} var min int if v, ok := tfMap["min"].(int); ok { min = v - apiObject.Min = aws.Int64(int64(v)) + apiObject.Min = aws.Int32(int32(v)) } if v, ok := tfMap["max"].(int); ok && v >= min { - apiObject.Max = aws.Int64(int64(v)) + apiObject.Max = aws.Int32(int32(v)) } return apiObject } -func expandNetworkBandwidthGbpsRequest(tfMap map[string]interface{}) *autoscaling.NetworkBandwidthGbpsRequest { +func expandNetworkBandwidthGbpsRequest(tfMap map[string]interface{}) *awstypes.NetworkBandwidthGbpsRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.NetworkBandwidthGbpsRequest{} + apiObject := &awstypes.NetworkBandwidthGbpsRequest{} var min float64 if v, ok := tfMap["min"].(float64); ok { @@ -3099,32 +3028,32 @@ func expandNetworkBandwidthGbpsRequest(tfMap map[string]interface{}) *autoscalin return apiObject } -func expandNetworkInterfaceCountRequest(tfMap map[string]interface{}) *autoscaling.NetworkInterfaceCountRequest { +func expandNetworkInterfaceCountRequest(tfMap map[string]interface{}) *awstypes.NetworkInterfaceCountRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.NetworkInterfaceCountRequest{} + apiObject := &awstypes.NetworkInterfaceCountRequest{} var min int if v, ok := tfMap["min"].(int); ok { min = v - apiObject.Min = aws.Int64(int64(v)) + apiObject.Min = aws.Int32(int32(v)) } if v, ok := tfMap["max"].(int); ok && v >= min { - apiObject.Max = aws.Int64(int64(v)) + apiObject.Max = aws.Int32(int32(v)) } return apiObject } -func expandTotalLocalStorageGBRequest(tfMap map[string]interface{}) *autoscaling.TotalLocalStorageGBRequest { +func expandTotalLocalStorageGBRequest(tfMap map[string]interface{}) *awstypes.TotalLocalStorageGBRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.TotalLocalStorageGBRequest{} + apiObject := &awstypes.TotalLocalStorageGBRequest{} var min float64 if v, ok := tfMap["min"].(float64); ok { @@ -3139,32 +3068,32 @@ func expandTotalLocalStorageGBRequest(tfMap map[string]interface{}) *autoscaling return apiObject } -func expandVCPUCountRequest(tfMap map[string]interface{}) *autoscaling.VCpuCountRequest { +func expandVCPUCountRequest(tfMap map[string]interface{}) *awstypes.VCpuCountRequest { if tfMap == nil { return nil } - apiObject := &autoscaling.VCpuCountRequest{} + apiObject := &awstypes.VCpuCountRequest{} min := 0 if v, ok := tfMap["min"].(int); ok { min = v - apiObject.Min = aws.Int64(int64(v)) + apiObject.Min = aws.Int32(int32(v)) } if v, ok := tfMap["max"].(int); ok && v >= min { - apiObject.Max = aws.Int64(int64(v)) + apiObject.Max = aws.Int32(int32(v)) } return apiObject } -func expandLaunchTemplateSpecificationForMixedInstancesPolicy(tfMap map[string]interface{}, hasDefaultVersion bool) *autoscaling.LaunchTemplateSpecification { +func expandLaunchTemplateSpecificationForMixedInstancesPolicy(tfMap map[string]interface{}, hasDefaultVersion bool) *awstypes.LaunchTemplateSpecification { if tfMap == nil { return nil } - apiObject := &autoscaling.LaunchTemplateSpecification{} + apiObject := &awstypes.LaunchTemplateSpecification{} // API returns both ID and name, which Terraform saves to state. Next update returns: // ValidationError: Valid requests must contain either launchTemplateId or LaunchTemplateName @@ -3186,12 +3115,12 @@ func expandLaunchTemplateSpecificationForMixedInstancesPolicy(tfMap map[string]i return apiObject } -func expandLaunchTemplateSpecification(tfMap map[string]interface{}, hasDefaultVersion bool) *autoscaling.LaunchTemplateSpecification { +func expandLaunchTemplateSpecification(tfMap map[string]interface{}, hasDefaultVersion bool) *awstypes.LaunchTemplateSpecification { if tfMap == nil { return nil } - apiObject := &autoscaling.LaunchTemplateSpecification{} + apiObject := &awstypes.LaunchTemplateSpecification{} // DescribeAutoScalingGroups returns both name and id but LaunchTemplateSpecification // allows only one of them to be set. @@ -3212,12 +3141,12 @@ func expandLaunchTemplateSpecification(tfMap map[string]interface{}, hasDefaultV return apiObject } -func expandMixedInstancesPolicy(tfMap map[string]interface{}, hasDefaultVersion bool) *autoscaling.MixedInstancesPolicy { +func expandMixedInstancesPolicy(tfMap map[string]interface{}, hasDefaultVersion bool) *awstypes.MixedInstancesPolicy { if tfMap == nil { return nil } - apiObject := &autoscaling.MixedInstancesPolicy{} + apiObject := &awstypes.MixedInstancesPolicy{} if v, ok := tfMap["instances_distribution"].([]interface{}); ok && len(v) > 0 { apiObject.InstancesDistribution = expandInstancesDistribution(v[0].(map[string]interface{})) @@ -3244,7 +3173,7 @@ func expandPutLifecycleHookInput(name string, tfMap map[string]interface{}) *aut } if v, ok := tfMap["heartbeat_timeout"].(int); ok && v != 0 { - apiObject.HeartbeatTimeout = aws.Int64(int64(v)) + apiObject.HeartbeatTimeout = aws.Int32(int32(v)) } if v, ok := tfMap["name"].(string); ok && v != "" { @@ -3310,26 +3239,26 @@ func expandPutWarmPoolInput(name string, tfMap map[string]interface{}) *autoscal } if v, ok := tfMap["max_group_prepared_capacity"].(int); ok && v != 0 { - apiObject.MaxGroupPreparedCapacity = aws.Int64(int64(v)) + apiObject.MaxGroupPreparedCapacity = aws.Int32(int32(v)) } if v, ok := tfMap["min_size"].(int); ok && v != 0 { - apiObject.MinSize = aws.Int64(int64(v)) + apiObject.MinSize = aws.Int32(int32(v)) } if v, ok := tfMap["pool_state"].(string); ok && v != "" { - apiObject.PoolState = aws.String(v) + apiObject.PoolState = awstypes.WarmPoolState(v) } return apiObject } -func expandInstanceReusePolicy(tfMap map[string]interface{}) *autoscaling.InstanceReusePolicy { +func expandInstanceReusePolicy(tfMap map[string]interface{}) *awstypes.InstanceReusePolicy { if tfMap == nil { return nil } - apiObject := &autoscaling.InstanceReusePolicy{} + apiObject := &awstypes.InstanceReusePolicy{} if v, ok := tfMap["reuse_on_scale_in"].(bool); ok { apiObject.ReuseOnScaleIn = aws.Bool(v) @@ -3338,7 +3267,7 @@ func expandInstanceReusePolicy(tfMap map[string]interface{}) *autoscaling.Instan return apiObject } -func expandStartInstanceRefreshInput(name string, tfMap map[string]interface{}, launchTemplate *autoscaling.LaunchTemplateSpecification, mixedInstancesPolicy *autoscaling.MixedInstancesPolicy) *autoscaling.StartInstanceRefreshInput { +func expandStartInstanceRefreshInput(name string, tfMap map[string]interface{}, launchTemplate *awstypes.LaunchTemplateSpecification, mixedInstancesPolicy *awstypes.MixedInstancesPolicy) *autoscaling.StartInstanceRefreshInput { if tfMap == nil { return nil } @@ -3351,8 +3280,8 @@ func expandStartInstanceRefreshInput(name string, tfMap map[string]interface{}, apiObject.Preferences = expandRefreshPreferences(v[0].(map[string]interface{})) // "The AutoRollback parameter cannot be set to true when the DesiredConfiguration parameter is empty". - if aws.BoolValue(apiObject.Preferences.AutoRollback) { - apiObject.DesiredConfiguration = &autoscaling.DesiredConfiguration{ + if aws.ToBool(apiObject.Preferences.AutoRollback) { + apiObject.DesiredConfiguration = &awstypes.DesiredConfiguration{ LaunchTemplate: launchTemplate, MixedInstancesPolicy: mixedInstancesPolicy, } @@ -3360,18 +3289,18 @@ func expandStartInstanceRefreshInput(name string, tfMap map[string]interface{}, } if v, ok := tfMap["strategy"].(string); ok && v != "" { - apiObject.Strategy = aws.String(v) + apiObject.Strategy = awstypes.RefreshStrategy(v) } return apiObject } -func expandRefreshPreferences(tfMap map[string]interface{}) *autoscaling.RefreshPreferences { +func expandRefreshPreferences(tfMap map[string]interface{}) *awstypes.RefreshPreferences { if tfMap == nil { return nil } - apiObject := &autoscaling.RefreshPreferences{} + apiObject := &awstypes.RefreshPreferences{} if v, ok := tfMap["auto_rollback"].(bool); ok { apiObject.AutoRollback = aws.Bool(v) @@ -3379,30 +3308,30 @@ func expandRefreshPreferences(tfMap map[string]interface{}) *autoscaling.Refresh if v, ok := tfMap["checkpoint_delay"].(string); ok { if v, null, _ := nullable.Int(v).Value(); !null { - apiObject.CheckpointDelay = aws.Int64(v) + apiObject.CheckpointDelay = aws.Int32(int32(v)) } } if v, ok := tfMap["checkpoint_percentages"].([]interface{}); ok && len(v) > 0 { - apiObject.CheckpointPercentages = flex.ExpandInt64List(v) + apiObject.CheckpointPercentages = flex.ExpandInt32ValueList(v) } if v, ok := tfMap["instance_warmup"].(string); ok { if v, null, _ := nullable.Int(v).Value(); !null { - apiObject.InstanceWarmup = aws.Int64(v) + apiObject.InstanceWarmup = aws.Int32(int32(v)) } } if v, ok := tfMap["max_healthy_percentage"].(int); ok { - apiObject.MaxHealthyPercentage = aws.Int64(int64(v)) + apiObject.MaxHealthyPercentage = aws.Int32(int32(v)) } if v, ok := tfMap["min_healthy_percentage"].(int); ok { - apiObject.MinHealthyPercentage = aws.Int64(int64(v)) + apiObject.MinHealthyPercentage = aws.Int32(int32(v)) } if v, ok := tfMap["scale_in_protected_instances"].(string); ok { - apiObject.ScaleInProtectedInstances = aws.String(v) + apiObject.ScaleInProtectedInstances = awstypes.ScaleInProtectedInstances(v) } if v, ok := tfMap["skip_matching"].(bool); ok { @@ -3410,7 +3339,7 @@ func expandRefreshPreferences(tfMap map[string]interface{}) *autoscaling.Refresh } if v, ok := tfMap["standby_instances"].(string); ok { - apiObject.StandbyInstances = aws.String(v) + apiObject.StandbyInstances = awstypes.StandbyInstances(v) } return apiObject @@ -3426,12 +3355,8 @@ func expandVPCZoneIdentifiers(tfList []interface{}) *string { return aws.String(strings.Join(vpcZoneIDs, ",")) } -func expandTrafficSourceIdentifier(tfMap map[string]interface{}) *autoscaling.TrafficSourceIdentifier { - if tfMap == nil { - return nil - } - - apiObject := &autoscaling.TrafficSourceIdentifier{} +func expandTrafficSourceIdentifier(tfMap map[string]interface{}) awstypes.TrafficSourceIdentifier { + apiObject := awstypes.TrafficSourceIdentifier{} if v, ok := tfMap["identifier"].(string); ok && v != "" { apiObject.Identifier = aws.String(v) @@ -3444,12 +3369,12 @@ func expandTrafficSourceIdentifier(tfMap map[string]interface{}) *autoscaling.Tr return apiObject } -func expandTrafficSourceIdentifiers(tfList []interface{}) []*autoscaling.TrafficSourceIdentifier { +func expandTrafficSourceIdentifiers(tfList []interface{}) []awstypes.TrafficSourceIdentifier { if len(tfList) == 0 { return nil } - var apiObjects []*autoscaling.TrafficSourceIdentifier + var apiObjects []awstypes.TrafficSourceIdentifier for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -3459,34 +3384,25 @@ func expandTrafficSourceIdentifiers(tfList []interface{}) []*autoscaling.Traffic } apiObject := expandTrafficSourceIdentifier(tfMap) - - if apiObject == nil { - continue - } - apiObjects = append(apiObjects, apiObject) } return apiObjects } -func flattenEnabledMetrics(apiObjects []*autoscaling.EnabledMetric) []string { +func flattenEnabledMetrics(apiObjects []awstypes.EnabledMetric) []string { var tfList []string for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - if v := apiObject.Metric; v != nil { - tfList = append(tfList, aws.StringValue(v)) + tfList = append(tfList, aws.ToString(v)) } } return tfList } -func flattenLaunchTemplateSpecification(apiObject *autoscaling.LaunchTemplateSpecification) map[string]interface{} { +func flattenLaunchTemplateSpecification(apiObject *awstypes.LaunchTemplateSpecification) map[string]interface{} { if apiObject == nil { return nil } @@ -3494,21 +3410,21 @@ func flattenLaunchTemplateSpecification(apiObject *autoscaling.LaunchTemplateSpe tfMap := map[string]interface{}{} if v := apiObject.LaunchTemplateId; v != nil { - tfMap["id"] = aws.StringValue(v) + tfMap["id"] = aws.ToString(v) } if v := apiObject.LaunchTemplateName; v != nil { - tfMap["name"] = aws.StringValue(v) + tfMap["name"] = aws.ToString(v) } if v := apiObject.Version; v != nil { - tfMap["version"] = aws.StringValue(v) + tfMap["version"] = aws.ToString(v) } return tfMap } -func flattenMixedInstancesPolicy(apiObject *autoscaling.MixedInstancesPolicy) map[string]interface{} { +func flattenMixedInstancesPolicy(apiObject *awstypes.MixedInstancesPolicy) map[string]interface{} { if apiObject == nil { return nil } @@ -3526,7 +3442,7 @@ func flattenMixedInstancesPolicy(apiObject *autoscaling.MixedInstancesPolicy) ma return tfMap } -func flattenInstancesDistribution(apiObject *autoscaling.InstancesDistribution) map[string]interface{} { +func flattenInstancesDistribution(apiObject *awstypes.InstancesDistribution) map[string]interface{} { if apiObject == nil { return nil } @@ -3534,50 +3450,50 @@ func flattenInstancesDistribution(apiObject *autoscaling.InstancesDistribution) tfMap := map[string]interface{}{} if v := apiObject.OnDemandAllocationStrategy; v != nil { - tfMap["on_demand_allocation_strategy"] = aws.StringValue(v) + tfMap["on_demand_allocation_strategy"] = aws.ToString(v) } if v := apiObject.OnDemandBaseCapacity; v != nil { - tfMap["on_demand_base_capacity"] = aws.Int64Value(v) + tfMap["on_demand_base_capacity"] = aws.ToInt32(v) } if v := apiObject.OnDemandPercentageAboveBaseCapacity; v != nil { - tfMap["on_demand_percentage_above_base_capacity"] = aws.Int64Value(v) + tfMap["on_demand_percentage_above_base_capacity"] = aws.ToInt32(v) } if v := apiObject.SpotAllocationStrategy; v != nil { - tfMap["spot_allocation_strategy"] = aws.StringValue(v) + tfMap["spot_allocation_strategy"] = aws.ToString(v) } if v := apiObject.SpotInstancePools; v != nil { - tfMap["spot_instance_pools"] = aws.Int64Value(v) + tfMap["spot_instance_pools"] = aws.ToInt32(v) } if v := apiObject.SpotMaxPrice; v != nil { - tfMap["spot_max_price"] = aws.StringValue(v) + tfMap["spot_max_price"] = aws.ToString(v) } return tfMap } -func expandInstanceMaintenancePolicy(l []interface{}) *autoscaling.InstanceMaintenancePolicy { +func expandInstanceMaintenancePolicy(l []interface{}) *awstypes.InstanceMaintenancePolicy { if len(l) == 0 { //Empty InstanceMaintenancePolicy block will reset already assigned values - return &autoscaling.InstanceMaintenancePolicy{ - MinHealthyPercentage: aws.Int64(-1), - MaxHealthyPercentage: aws.Int64(-1), + return &awstypes.InstanceMaintenancePolicy{ + MinHealthyPercentage: aws.Int32(-1), + MaxHealthyPercentage: aws.Int32(-1), } } tfMap := l[0].(map[string]interface{}) - return &autoscaling.InstanceMaintenancePolicy{ - MinHealthyPercentage: aws.Int64(int64(tfMap["min_healthy_percentage"].(int))), - MaxHealthyPercentage: aws.Int64(int64(tfMap["max_healthy_percentage"].(int))), + return &awstypes.InstanceMaintenancePolicy{ + MinHealthyPercentage: aws.Int32(int32(tfMap["min_healthy_percentage"].(int))), + MaxHealthyPercentage: aws.Int32(int32(tfMap["max_healthy_percentage"].(int))), } } -func flattenInstanceMaintenancePolicy(instanceMaintenancePolicy *autoscaling.InstanceMaintenancePolicy) []interface{} { +func flattenInstanceMaintenancePolicy(instanceMaintenancePolicy *awstypes.InstanceMaintenancePolicy) []interface{} { if instanceMaintenancePolicy == nil { return []interface{}{} } @@ -3590,7 +3506,7 @@ func flattenInstanceMaintenancePolicy(instanceMaintenancePolicy *autoscaling.Ins return []interface{}{m} } -func flattenLaunchTemplate(apiObject *autoscaling.LaunchTemplate) map[string]interface{} { +func flattenLaunchTemplate(apiObject *awstypes.LaunchTemplate) map[string]interface{} { if apiObject == nil { return nil } @@ -3608,7 +3524,7 @@ func flattenLaunchTemplate(apiObject *autoscaling.LaunchTemplate) map[string]int return tfMap } -func flattenLaunchTemplateSpecificationForMixedInstancesPolicy(apiObject *autoscaling.LaunchTemplateSpecification) map[string]interface{} { +func flattenLaunchTemplateSpecificationForMixedInstancesPolicy(apiObject *awstypes.LaunchTemplateSpecification) map[string]interface{} { if apiObject == nil { return nil } @@ -3616,25 +3532,21 @@ func flattenLaunchTemplateSpecificationForMixedInstancesPolicy(apiObject *autosc tfMap := map[string]interface{}{} if v := apiObject.LaunchTemplateId; v != nil { - tfMap["launch_template_id"] = aws.StringValue(v) + tfMap["launch_template_id"] = aws.ToString(v) } if v := apiObject.LaunchTemplateName; v != nil { - tfMap["launch_template_name"] = aws.StringValue(v) + tfMap["launch_template_name"] = aws.ToString(v) } if v := apiObject.Version; v != nil { - tfMap["version"] = aws.StringValue(v) + tfMap["version"] = aws.ToString(v) } return tfMap } -func flattenLaunchTemplateOverrides(apiObject *autoscaling.LaunchTemplateOverrides) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenLaunchTemplateOverrides(apiObject awstypes.LaunchTemplateOverrides) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.InstanceRequirements; v != nil { @@ -3642,7 +3554,7 @@ func flattenLaunchTemplateOverrides(apiObject *autoscaling.LaunchTemplateOverrid } if v := apiObject.InstanceType; v != nil { - tfMap["instance_type"] = aws.StringValue(v) + tfMap["instance_type"] = aws.ToString(v) } if v := apiObject.LaunchTemplateSpecification; v != nil { @@ -3650,13 +3562,13 @@ func flattenLaunchTemplateOverrides(apiObject *autoscaling.LaunchTemplateOverrid } if v := apiObject.WeightedCapacity; v != nil { - tfMap["weighted_capacity"] = aws.StringValue(v) + tfMap["weighted_capacity"] = aws.ToString(v) } return tfMap } -func flattenLaunchTemplateOverrideses(apiObjects []*autoscaling.LaunchTemplateOverrides) []interface{} { +func flattenLaunchTemplateOverrideses(apiObjects []awstypes.LaunchTemplateOverrides) []interface{} { if len(apiObjects) == 0 { return nil } @@ -3664,17 +3576,13 @@ func flattenLaunchTemplateOverrideses(apiObjects []*autoscaling.LaunchTemplateOv var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenLaunchTemplateOverrides(apiObject)) } return tfList } -func flattenInstanceRequirements(apiObject *autoscaling.InstanceRequirements) map[string]interface{} { +func flattenInstanceRequirements(apiObject *awstypes.InstanceRequirements) map[string]interface{} { if apiObject == nil { return nil } @@ -3685,56 +3593,50 @@ func flattenInstanceRequirements(apiObject *autoscaling.InstanceRequirements) ma tfMap["accelerator_count"] = []interface{}{flattenAcceleratorCount(v)} } - if v := apiObject.AcceleratorManufacturers; v != nil { - tfMap["accelerator_manufacturers"] = aws.StringValueSlice(v) + if apiObject.AcceleratorManufacturers != nil { + tfMap["accelerator_manufacturers"] = apiObject.AcceleratorManufacturers } - if v := apiObject.AcceleratorNames; v != nil { - tfMap["accelerator_names"] = aws.StringValueSlice(v) + if apiObject.AcceleratorNames != nil { + tfMap["accelerator_names"] = apiObject.AcceleratorNames } if v := apiObject.AcceleratorTotalMemoryMiB; v != nil { tfMap["accelerator_total_memory_mib"] = []interface{}{flattenAcceleratorTotalMemoryMiB(v)} } - if v := apiObject.AcceleratorTypes; v != nil { - tfMap["accelerator_types"] = aws.StringValueSlice(v) + if apiObject.AcceleratorTypes != nil { + tfMap["accelerator_types"] = apiObject.AcceleratorTypes } - if v := apiObject.AllowedInstanceTypes; v != nil { - tfMap["allowed_instance_types"] = aws.StringValueSlice(v) + if apiObject.AllowedInstanceTypes != nil { + tfMap["allowed_instance_types"] = apiObject.AllowedInstanceTypes } - if v := apiObject.BareMetal; v != nil { - tfMap["bare_metal"] = aws.StringValue(v) - } + tfMap["bare_metal"] = apiObject.BareMetal if v := apiObject.BaselineEbsBandwidthMbps; v != nil { tfMap["baseline_ebs_bandwidth_mbps"] = []interface{}{flattenBaselineEBSBandwidthMbps(v)} } - if v := apiObject.BurstablePerformance; v != nil { - tfMap["burstable_performance"] = aws.StringValue(v) - } + tfMap["burstable_performance"] = apiObject.BurstablePerformance if v := apiObject.CpuManufacturers; v != nil { - tfMap["cpu_manufacturers"] = aws.StringValueSlice(v) + tfMap["cpu_manufacturers"] = apiObject.CpuManufacturers } if v := apiObject.ExcludedInstanceTypes; v != nil { - tfMap["excluded_instance_types"] = aws.StringValueSlice(v) + tfMap["excluded_instance_types"] = apiObject.ExcludedInstanceTypes } if v := apiObject.InstanceGenerations; v != nil { - tfMap["instance_generations"] = aws.StringValueSlice(v) + tfMap["instance_generations"] = apiObject.InstanceGenerations } - if v := apiObject.LocalStorage; v != nil { - tfMap["local_storage"] = aws.StringValue(v) - } + tfMap["local_storage"] = apiObject.LocalStorage if v := apiObject.LocalStorageTypes; v != nil { - tfMap["local_storage_types"] = aws.StringValueSlice(v) + tfMap["local_storage_types"] = apiObject.LocalStorageTypes } if v := apiObject.MemoryGiBPerVCpu; v != nil { @@ -3754,15 +3656,15 @@ func flattenInstanceRequirements(apiObject *autoscaling.InstanceRequirements) ma } if v := apiObject.OnDemandMaxPricePercentageOverLowestPrice; v != nil { - tfMap["on_demand_max_price_percentage_over_lowest_price"] = aws.Int64Value(v) + tfMap["on_demand_max_price_percentage_over_lowest_price"] = aws.ToInt32(v) } if v := apiObject.RequireHibernateSupport; v != nil { - tfMap["require_hibernate_support"] = aws.BoolValue(v) + tfMap["require_hibernate_support"] = aws.ToBool(v) } if v := apiObject.SpotMaxPricePercentageOverLowestPrice; v != nil { - tfMap["spot_max_price_percentage_over_lowest_price"] = aws.Int64Value(v) + tfMap["spot_max_price_percentage_over_lowest_price"] = aws.ToInt32(v) } if v := apiObject.TotalLocalStorageGB; v != nil { @@ -3776,7 +3678,7 @@ func flattenInstanceRequirements(apiObject *autoscaling.InstanceRequirements) ma return tfMap } -func flattenAcceleratorCount(apiObject *autoscaling.AcceleratorCountRequest) map[string]interface{} { +func flattenAcceleratorCount(apiObject *awstypes.AcceleratorCountRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3784,17 +3686,17 @@ func flattenAcceleratorCount(apiObject *autoscaling.AcceleratorCountRequest) map tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Int64Value(v) + tfMap["max"] = aws.ToInt32(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Int64Value(v) + tfMap["min"] = aws.ToInt32(v) } return tfMap } -func flattenAcceleratorTotalMemoryMiB(apiObject *autoscaling.AcceleratorTotalMemoryMiBRequest) map[string]interface{} { +func flattenAcceleratorTotalMemoryMiB(apiObject *awstypes.AcceleratorTotalMemoryMiBRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3802,17 +3704,17 @@ func flattenAcceleratorTotalMemoryMiB(apiObject *autoscaling.AcceleratorTotalMem tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Int64Value(v) + tfMap["max"] = aws.ToInt32(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Int64Value(v) + tfMap["min"] = aws.ToInt32(v) } return tfMap } -func flattenBaselineEBSBandwidthMbps(apiObject *autoscaling.BaselineEbsBandwidthMbpsRequest) map[string]interface{} { +func flattenBaselineEBSBandwidthMbps(apiObject *awstypes.BaselineEbsBandwidthMbpsRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3820,17 +3722,17 @@ func flattenBaselineEBSBandwidthMbps(apiObject *autoscaling.BaselineEbsBandwidth tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Int64Value(v) + tfMap["max"] = aws.ToInt32(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Int64Value(v) + tfMap["min"] = aws.ToInt32(v) } return tfMap } -func flattenMemoryGiBPerVCPU(apiObject *autoscaling.MemoryGiBPerVCpuRequest) map[string]interface{} { +func flattenMemoryGiBPerVCPU(apiObject *awstypes.MemoryGiBPerVCpuRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3838,17 +3740,17 @@ func flattenMemoryGiBPerVCPU(apiObject *autoscaling.MemoryGiBPerVCpuRequest) map tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Float64Value(v) + tfMap["max"] = aws.ToFloat64(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Float64Value(v) + tfMap["min"] = aws.ToFloat64(v) } return tfMap } -func flattenMemoryMiB(apiObject *autoscaling.MemoryMiBRequest) map[string]interface{} { +func flattenMemoryMiB(apiObject *awstypes.MemoryMiBRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3856,17 +3758,17 @@ func flattenMemoryMiB(apiObject *autoscaling.MemoryMiBRequest) map[string]interf tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Int64Value(v) + tfMap["max"] = aws.ToInt32(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Int64Value(v) + tfMap["min"] = aws.ToInt32(v) } return tfMap } -func flattenNetworkBandwidthGbps(apiObject *autoscaling.NetworkBandwidthGbpsRequest) map[string]interface{} { +func flattenNetworkBandwidthGbps(apiObject *awstypes.NetworkBandwidthGbpsRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3874,17 +3776,17 @@ func flattenNetworkBandwidthGbps(apiObject *autoscaling.NetworkBandwidthGbpsRequ tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Float64Value(v) + tfMap["max"] = aws.ToFloat64(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Float64Value(v) + tfMap["min"] = aws.ToFloat64(v) } return tfMap } -func flattenNetworkInterfaceCount(apiObject *autoscaling.NetworkInterfaceCountRequest) map[string]interface{} { +func flattenNetworkInterfaceCount(apiObject *awstypes.NetworkInterfaceCountRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3892,17 +3794,17 @@ func flattenNetworkInterfaceCount(apiObject *autoscaling.NetworkInterfaceCountRe tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Int64Value(v) + tfMap["max"] = aws.ToInt32(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Int64Value(v) + tfMap["min"] = aws.ToInt32(v) } return tfMap } -func flattentTotalLocalStorageGB(apiObject *autoscaling.TotalLocalStorageGBRequest) map[string]interface{} { +func flattentTotalLocalStorageGB(apiObject *awstypes.TotalLocalStorageGBRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3910,35 +3812,31 @@ func flattentTotalLocalStorageGB(apiObject *autoscaling.TotalLocalStorageGBReque tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Float64Value(v) + tfMap["max"] = aws.ToFloat64(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Float64Value(v) + tfMap["min"] = aws.ToFloat64(v) } return tfMap } -func flattenTrafficSourceIdentifier(apiObject *autoscaling.TrafficSourceIdentifier) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenTrafficSourceIdentifier(apiObject awstypes.TrafficSourceIdentifier) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Identifier; v != nil { - tfMap["identifier"] = aws.StringValue(v) + tfMap["identifier"] = aws.ToString(v) } if v := apiObject.Type; v != nil { - tfMap["type"] = aws.StringValue(v) + tfMap["type"] = aws.ToString(v) } return tfMap } -func flattenTrafficSourceIdentifiers(apiObjects []*autoscaling.TrafficSourceIdentifier) []interface{} { +func flattenTrafficSourceIdentifiers(apiObjects []awstypes.TrafficSourceIdentifier) []interface{} { if len(apiObjects) == 0 { return nil } @@ -3946,17 +3844,13 @@ func flattenTrafficSourceIdentifiers(apiObjects []*autoscaling.TrafficSourceIden var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenTrafficSourceIdentifier(apiObject)) } return tfList } -func flattenVCPUCount(apiObject *autoscaling.VCpuCountRequest) map[string]interface{} { +func flattenVCPUCount(apiObject *awstypes.VCpuCountRequest) map[string]interface{} { if apiObject == nil { return nil } @@ -3964,17 +3858,17 @@ func flattenVCPUCount(apiObject *autoscaling.VCpuCountRequest) map[string]interf tfMap := map[string]interface{}{} if v := apiObject.Max; v != nil { - tfMap["max"] = aws.Int64Value(v) + tfMap["max"] = aws.ToInt32(v) } if v := apiObject.Min; v != nil { - tfMap["min"] = aws.Int64Value(v) + tfMap["min"] = aws.ToInt32(v) } return tfMap } -func flattenSuspendedProcesses(apiObjects []*autoscaling.SuspendedProcess) []string { +func flattenSuspendedProcesses(apiObjects []awstypes.SuspendedProcess) []string { if len(apiObjects) == 0 { return nil } @@ -3983,14 +3877,14 @@ func flattenSuspendedProcesses(apiObjects []*autoscaling.SuspendedProcess) []str for _, apiObject := range apiObjects { if v := apiObject.ProcessName; v != nil { - tfList = append(tfList, aws.StringValue(v)) + tfList = append(tfList, aws.ToString(v)) } } return tfList } -func flattenWarmPoolConfiguration(apiObject *autoscaling.WarmPoolConfiguration) map[string]interface{} { +func flattenWarmPoolConfiguration(apiObject *awstypes.WarmPoolConfiguration) map[string]interface{} { if apiObject == nil { return nil } @@ -4002,23 +3896,21 @@ func flattenWarmPoolConfiguration(apiObject *autoscaling.WarmPoolConfiguration) } if v := apiObject.MaxGroupPreparedCapacity; v != nil { - tfMap["max_group_prepared_capacity"] = aws.Int64Value(v) + tfMap["max_group_prepared_capacity"] = aws.ToInt32(v) } else { - tfMap["max_group_prepared_capacity"] = int64(DefaultWarmPoolMaxGroupPreparedCapacity) + tfMap["max_group_prepared_capacity"] = int64(defaultWarmPoolMaxGroupPreparedCapacity) } if v := apiObject.MinSize; v != nil { - tfMap["min_size"] = aws.Int64Value(v) + tfMap["min_size"] = aws.ToInt32(v) } - if v := apiObject.PoolState; v != nil { - tfMap["pool_state"] = aws.StringValue(v) - } + tfMap["pool_state"] = apiObject.PoolState return tfMap } -func flattenWarmPoolInstanceReusePolicy(apiObject *autoscaling.InstanceReusePolicy) map[string]interface{} { +func flattenWarmPoolInstanceReusePolicy(apiObject *awstypes.InstanceReusePolicy) map[string]interface{} { if apiObject == nil { return nil } @@ -4026,20 +3918,20 @@ func flattenWarmPoolInstanceReusePolicy(apiObject *autoscaling.InstanceReusePoli tfMap := map[string]interface{}{} if v := apiObject.ReuseOnScaleIn; v != nil { - tfMap["reuse_on_scale_in"] = aws.BoolValue(v) + tfMap["reuse_on_scale_in"] = aws.ToBool(v) } return tfMap } -func cancelInstanceRefresh(ctx context.Context, conn *autoscaling.AutoScaling, name string) error { +func cancelInstanceRefresh(ctx context.Context, conn *autoscaling.Client, name string) error { input := &autoscaling.CancelInstanceRefreshInput{ AutoScalingGroupName: aws.String(name), } - output, err := conn.CancelInstanceRefreshWithContext(ctx, input) + output, err := conn.CancelInstanceRefresh(ctx, input) - if tfawserr.ErrCodeEquals(err, autoscaling.ErrCodeActiveInstanceRefreshNotFoundFault) { + if errs.IsA[*awstypes.ActiveInstanceRefreshNotFoundFault](err) { return nil } @@ -4047,7 +3939,7 @@ func cancelInstanceRefresh(ctx context.Context, conn *autoscaling.AutoScaling, n return fmt.Errorf("cancelling Auto Scaling Group (%s) instance refresh: %w", name, err) } - _, err = waitInstanceRefreshCancelled(ctx, conn, name, aws.StringValue(output.InstanceRefreshId), instanceRefreshCancelledTimeout) + _, err = waitInstanceRefreshCancelled(ctx, conn, name, aws.ToString(output.InstanceRefreshId), instanceRefreshCancelledTimeout) if err != nil { return fmt.Errorf("waiting for Auto Scaling Group (%s) instance refresh cancel: %w", name, err) @@ -4056,15 +3948,15 @@ func cancelInstanceRefresh(ctx context.Context, conn *autoscaling.AutoScaling, n return nil } -func startInstanceRefresh(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.StartInstanceRefreshInput) error { - name := aws.StringValue(input.AutoScalingGroupName) +func startInstanceRefresh(ctx context.Context, conn *autoscaling.Client, input *autoscaling.StartInstanceRefreshInput) error { + name := aws.ToString(input.AutoScalingGroupName) _, err := tfresource.RetryWhen(ctx, instanceRefreshStartedTimeout, func() (interface{}, error) { - return conn.StartInstanceRefreshWithContext(ctx, input) + return conn.StartInstanceRefresh(ctx, input) }, func(err error) (bool, error) { - if tfawserr.ErrCodeEquals(err, autoscaling.ErrCodeInstanceRefreshInProgressFault) { + if errs.IsA[*awstypes.InstanceRefreshInProgressFault](err) { if err := cancelInstanceRefresh(ctx, conn, name); err != nil { return false, err } @@ -4099,7 +3991,7 @@ func validateGroupInstanceRefreshTriggerFields(i interface{}, path cty.Path) dia } } - schema := ResourceGroup().SchemaMap() + schema := resourceGroup().SchemaMap() for attr, attrSchema := range schema { if v == attr { if attrSchema.Computed && !attrSchema.Optional { diff --git a/internal/service/autoscaling/group_data_source.go b/internal/service/autoscaling/group_data_source.go index 78dde67dd21..3aace283280 100644 --- a/internal/service/autoscaling/group_data_source.go +++ b/internal/service/autoscaling/group_data_source.go @@ -4,20 +4,17 @@ package autoscaling import ( - "bytes" "context" - "fmt" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" ) -// @SDKDataSource("aws_autoscaling_group") -func DataSourceGroup() *schema.Resource { +// @SDKDataSource("aws_autoscaling_group", name="Group") +func dataSourceGroup() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceGroupRead, @@ -487,16 +484,6 @@ func DataSourceGroup() *schema.Resource { }, }, }, - // This should be removable, but wait until other tags work is being done. - Set: func(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["key"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) - buf.WriteString(fmt.Sprintf("%t-", m["propagate_at_launch"].(bool))) - - return create.StringHashcode(buf.String()) - }, }, "target_group_arns": { Type: schema.TypeSet, @@ -574,19 +561,19 @@ func DataSourceGroup() *schema.Resource { func dataSourceGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig groupName := d.Get("name").(string) - group, err := FindGroupByName(ctx, conn, groupName) + group, err := findGroupByName(ctx, conn, groupName) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Auto Scaling Group (%s): %s", groupName, err) } - d.SetId(aws.StringValue(group.AutoScalingGroupName)) + d.SetId(aws.ToString(group.AutoScalingGroupName)) d.Set("arn", group.AutoScalingGroupARN) - d.Set("availability_zones", aws.StringValueSlice(group.AvailabilityZones)) + d.Set("availability_zones", group.AvailabilityZones) d.Set("default_cooldown", group.DefaultCooldown) d.Set("desired_capacity", group.DesiredCapacity) d.Set("desired_capacity_type", group.DesiredCapacityType) @@ -604,7 +591,7 @@ func dataSourceGroupRead(ctx context.Context, d *schema.ResourceData, meta inter } else { d.Set("launch_template", nil) } - d.Set("load_balancers", aws.StringValueSlice(group.LoadBalancerNames)) + d.Set("load_balancers", group.LoadBalancerNames) d.Set("max_instance_lifetime", group.MaxInstanceLifetime) d.Set("max_size", group.MaxSize) d.Set("min_size", group.MinSize) @@ -622,11 +609,11 @@ func dataSourceGroupRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("service_linked_role_arn", group.ServiceLinkedRoleARN) d.Set("status", group.Status) d.Set("suspended_processes", flattenSuspendedProcesses(group.SuspendedProcesses)) - if err := d.Set("tag", ListOfMap(KeyValueTags(ctx, group.Tags, d.Id(), TagResourceTypeGroup).IgnoreAWS().IgnoreConfig(ignoreTagsConfig))); err != nil { + if err := d.Set("tag", listOfMap(KeyValueTags(ctx, group.Tags, d.Id(), TagResourceTypeGroup).IgnoreAWS().IgnoreConfig(ignoreTagsConfig))); err != nil { return sdkdiag.AppendErrorf(diags, "setting tag: %s", err) } - d.Set("target_group_arns", aws.StringValueSlice(group.TargetGroupARNs)) - d.Set("termination_policies", aws.StringValueSlice(group.TerminationPolicies)) + d.Set("target_group_arns", group.TargetGroupARNs) + d.Set("termination_policies", group.TerminationPolicies) if err := d.Set("traffic_source", flattenTrafficSourceIdentifiers(group.TrafficSources)); err != nil { return sdkdiag.AppendErrorf(diags, "setting traffic_source: %s", err) } diff --git a/internal/service/autoscaling/group_migrate.go b/internal/service/autoscaling/group_migrate.go index 8bb7d144b1e..f691d217f41 100644 --- a/internal/service/autoscaling/group_migrate.go +++ b/internal/service/autoscaling/group_migrate.go @@ -6,7 +6,7 @@ package autoscaling import ( "context" - "github.com/aws/aws-sdk-go/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" ) @@ -216,7 +216,7 @@ func resourceGroupV0() *schema.Resource { "metrics_granularity": { Type: schema.TypeString, Optional: true, - Default: DefaultEnabledMetricsGranularity, + Default: defaultEnabledMetricsGranularity, }, "min_elb_capacity": { Type: schema.TypeInt, @@ -718,7 +718,7 @@ func resourceGroupV0() *schema.Resource { "max_group_prepared_capacity": { Type: schema.TypeInt, Optional: true, - Default: DefaultWarmPoolMaxGroupPreparedCapacity, + Default: defaultWarmPoolMaxGroupPreparedCapacity, }, "min_size": { Type: schema.TypeInt, @@ -728,7 +728,7 @@ func resourceGroupV0() *schema.Resource { "pool_state": { Type: schema.TypeString, Optional: true, - Default: autoscaling.WarmPoolStateStopped, + Default: awstypes.WarmPoolStateStopped, }, }, }, diff --git a/internal/service/autoscaling/group_tag.go b/internal/service/autoscaling/group_tag.go index e2967504f6c..96280e3e71e 100644 --- a/internal/service/autoscaling/group_tag.go +++ b/internal/service/autoscaling/group_tag.go @@ -15,13 +15,14 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_autoscaling_group_tag") -func ResourceGroupTag() *schema.Resource { +// @SDKResource("aws_autoscaling_group_tag", name="Group Tag") +func resourceGroupTag() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceGroupTagCreate, ReadWithoutTimeout: resourceGroupTagRead, UpdateWithoutTimeout: resourceGroupTagUpdate, DeleteWithoutTimeout: resourceGroupTagDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -60,7 +61,7 @@ func ResourceGroupTag() *schema.Resource { func resourceGroupTagCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { // nosemgrep:ci.semgrep.tags.calling-UpdateTags-in-resource-create var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) identifier := d.Get("autoscaling_group_name").(string) tags := d.Get("tag").([]interface{}) @@ -77,14 +78,14 @@ func resourceGroupTagCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceGroupTagRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) identifier, key, err := tftags.GetResourceID(d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading AutoScaling Group (%s) tag (%s): %s", identifier, key, err) } - value, err := GetTag(ctx, conn, identifier, TagResourceTypeGroup, key) + value, err := findTag(ctx, conn, identifier, TagResourceTypeGroup, key) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AutoScaling Group (%s) tag (%s), removing from state", identifier, key) @@ -111,7 +112,7 @@ func resourceGroupTagRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceGroupTagUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) identifier, key, err := tftags.GetResourceID(d.Id()) if err != nil { @@ -127,7 +128,7 @@ func resourceGroupTagUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceGroupTagDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) identifier, key, err := tftags.GetResourceID(d.Id()) if err != nil { diff --git a/internal/service/autoscaling/group_tag_test.go b/internal/service/autoscaling/group_tag_test.go index 6e54fcb813b..7b310e396ee 100644 --- a/internal/service/autoscaling/group_tag_test.go +++ b/internal/service/autoscaling/group_tag_test.go @@ -104,7 +104,7 @@ func TestAccAutoScalingGroupTag_value(t *testing.T) { func testAccCheckGroupTagDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_group_tag" { @@ -117,7 +117,7 @@ func testAccCheckGroupTagDestroy(ctx context.Context) resource.TestCheckFunc { return err } - _, err = tfautoscaling.GetTag(ctx, conn, identifier, tfautoscaling.TagResourceTypeGroup, key) + _, err = tfautoscaling.FindTag(ctx, conn, identifier, tfautoscaling.TagResourceTypeGroup, key) if tfresource.NotFound(err) { continue @@ -141,19 +141,15 @@ func testAccCheckGroupTagExists(ctx context.Context, n string) resource.TestChec return fmt.Errorf("not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("%s: missing resource ID", n) - } - identifier, key, err := tftags.GetResourceID(rs.Primary.ID) if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) - _, err = tfautoscaling.GetTag(ctx, conn, identifier, tfautoscaling.TagResourceTypeGroup, key) + _, err = tfautoscaling.FindTag(ctx, conn, identifier, tfautoscaling.TagResourceTypeGroup, key) return err } diff --git a/internal/service/autoscaling/group_test.go b/internal/service/autoscaling/group_test.go index dbd64f117ce..a29b8701b3a 100644 --- a/internal/service/autoscaling/group_test.go +++ b/internal/service/autoscaling/group_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/aws/aws-sdk-go/service/elbv2" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -44,8 +45,11 @@ func testAccGroupImportStep(n string) resource.TestStep { "force_delete", "ignore_failed_scaling_activities", "initial_lifecycle_hook", + "load_balancers", "tag", "tags", + "target_group_arns", + "traffic_source", "wait_for_capacity_timeout", "wait_for_elb_capacity", }, @@ -54,7 +58,7 @@ func testAccGroupImportStep(n string) resource.TestStep { func TestAccAutoScalingGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -119,7 +123,7 @@ func TestAccAutoScalingGroup_basic(t *testing.T) { func TestAccAutoScalingGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -143,7 +147,7 @@ func TestAccAutoScalingGroup_disappears(t *testing.T) { func TestAccAutoScalingGroup_defaultInstanceWarmup(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -174,7 +178,7 @@ func TestAccAutoScalingGroup_defaultInstanceWarmup(t *testing.T) { func TestAccAutoScalingGroup_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -199,7 +203,7 @@ func TestAccAutoScalingGroup_nameGenerated(t *testing.T) { func TestAccAutoScalingGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -224,7 +228,7 @@ func TestAccAutoScalingGroup_namePrefix(t *testing.T) { func TestAccAutoScalingGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -282,7 +286,7 @@ func TestAccAutoScalingGroup_tags(t *testing.T) { func TestAccAutoScalingGroup_simple(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -398,7 +402,7 @@ func TestAccAutoScalingGroup_simple(t *testing.T) { func TestAccAutoScalingGroup_terminationPolicies(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -443,7 +447,7 @@ func TestAccAutoScalingGroup_terminationPolicies(t *testing.T) { func TestAccAutoScalingGroup_vpcUpdates(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -478,7 +482,7 @@ func TestAccAutoScalingGroup_vpcUpdates(t *testing.T) { func TestAccAutoScalingGroup_withInstanceMaintenancePolicyAfterCreation(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -536,7 +540,7 @@ func TestAccAutoScalingGroup_withInstanceMaintenancePolicyAfterCreation(t *testi func TestAccAutoScalingGroup_withInstanceMaintenancePolicyAtCreation(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -589,7 +593,7 @@ func TestAccAutoScalingGroup_withInstanceMaintenancePolicyAtCreation(t *testing. func TestAccAutoScalingGroup_withInstanceMaintenancePolicyNegativeValues(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -622,7 +626,7 @@ func TestAccAutoScalingGroup_withInstanceMaintenancePolicyNegativeValues(t *test func TestAccAutoScalingGroup_withLoadBalancer(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -654,7 +658,7 @@ func TestAccAutoScalingGroup_withLoadBalancer(t *testing.T) { func TestAccAutoScalingGroup_WithLoadBalancer_toTargetGroup(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -698,7 +702,7 @@ func TestAccAutoScalingGroup_WithLoadBalancer_toTargetGroup(t *testing.T) { func TestAccAutoScalingGroup_withTrafficSourceELB(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -730,7 +734,7 @@ func TestAccAutoScalingGroup_withTrafficSourceELB(t *testing.T) { func TestAccAutoScalingGroup_withTrafficSourcesELBs(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -765,7 +769,7 @@ func TestAccAutoScalingGroup_withTrafficSourcesELBs(t *testing.T) { func TestAccAutoScalingGroup_withTrafficSourceELB_toTargetGroup(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -803,7 +807,7 @@ func TestAccAutoScalingGroup_withTrafficSourceELB_toTargetGroup(t *testing.T) { func TestAccAutoScalingGroup_withTrafficSourceELBV2(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -841,7 +845,7 @@ func TestAccAutoScalingGroup_withTrafficSourceELBV2(t *testing.T) { func TestAccAutoScalingGroup_withTrafficSourceVPCLatticeTargetGroup(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -870,7 +874,7 @@ func TestAccAutoScalingGroup_withTrafficSourceVPCLatticeTargetGroup(t *testing.T func TestAccAutoScalingGroup_withTrafficSourceVPCLatticeTargetGroups(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -894,7 +898,7 @@ func TestAccAutoScalingGroup_withTrafficSourceVPCLatticeTargetGroups(t *testing. func TestAccAutoScalingGroup_withPlacementGroup(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -972,7 +976,7 @@ func TestAccAutoScalingGroup_withScalingActivityErrorIncorrectInstanceArchitectu func TestAccAutoScalingGroup_withScalingActivityErrorIncorrectInstanceArchitecture_Recovers(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1004,7 +1008,7 @@ func TestAccAutoScalingGroup_withScalingActivityErrorIncorrectInstanceArchitectu func TestAccAutoScalingGroup_enablingMetrics(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1040,7 +1044,7 @@ func TestAccAutoScalingGroup_enablingMetrics(t *testing.T) { func TestAccAutoScalingGroup_withMetrics(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1088,7 +1092,7 @@ func TestAccAutoScalingGroup_withMetrics(t *testing.T) { func TestAccAutoScalingGroup_suspendingProcesses(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1130,7 +1134,7 @@ func TestAccAutoScalingGroup_suspendingProcesses(t *testing.T) { func TestAccAutoScalingGroup_serviceLinkedRoleARN(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1154,7 +1158,7 @@ func TestAccAutoScalingGroup_serviceLinkedRoleARN(t *testing.T) { func TestAccAutoScalingGroup_maxInstanceLifetime(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1185,7 +1189,7 @@ func TestAccAutoScalingGroup_maxInstanceLifetime(t *testing.T) { func TestAccAutoScalingGroup_initialLifecycleHook(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1235,7 +1239,7 @@ func TestAccAutoScalingGroup_initialLifecycleHook(t *testing.T) { func TestAccAutoScalingGroup_launchTemplate(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1262,7 +1266,7 @@ func TestAccAutoScalingGroup_launchTemplate(t *testing.T) { func TestAccAutoScalingGroup_LaunchTemplate_update(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateNameUpdated := fmt.Sprintf("%s_updated", rName) resourceName := "aws_autoscaling_group.test" @@ -1343,7 +1347,7 @@ func TestAccAutoScalingGroup_LaunchTemplate_update(t *testing.T) { func TestAccAutoScalingGroup_largeDesiredCapacity(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1369,7 +1373,7 @@ func TestAccAutoScalingGroup_largeDesiredCapacity(t *testing.T) { func TestAccAutoScalingGroup_InstanceRefresh_basic(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1521,7 +1525,7 @@ func TestAccAutoScalingGroup_InstanceRefresh_basic(t *testing.T) { func TestAccAutoScalingGroup_InstanceRefresh_start(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" launchConfigurationResourceName := "aws_launch_configuration.test" @@ -1546,7 +1550,7 @@ func TestAccAutoScalingGroup_InstanceRefresh_start(t *testing.T) { testAccCheckGroupExists(ctx, resourceName, &group), resource.TestCheckResourceAttrPair(resourceName, "launch_configuration", launchConfigurationResourceName, "name"), testAccCheckInstanceRefreshCount(ctx, &group, 1), - testAccCheckInstanceRefreshStatus(ctx, &group, 0, autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress), + testAccCheckInstanceRefreshStatus(ctx, &group, 0, awstypes.InstanceRefreshStatusPending, awstypes.InstanceRefreshStatusInProgress), ), }, { @@ -1555,8 +1559,8 @@ func TestAccAutoScalingGroup_InstanceRefresh_start(t *testing.T) { testAccCheckGroupExists(ctx, resourceName, &group), resource.TestCheckResourceAttrPair(resourceName, "launch_configuration", launchConfigurationResourceName, "name"), testAccCheckInstanceRefreshCount(ctx, &group, 2), - testAccCheckInstanceRefreshStatus(ctx, &group, 0, autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress), - testAccCheckInstanceRefreshStatus(ctx, &group, 1, autoscaling.InstanceRefreshStatusCancelled), + testAccCheckInstanceRefreshStatus(ctx, &group, 0, awstypes.InstanceRefreshStatusPending, awstypes.InstanceRefreshStatusInProgress), + testAccCheckInstanceRefreshStatus(ctx, &group, 1, awstypes.InstanceRefreshStatusCancelled), ), }, }, @@ -1565,7 +1569,7 @@ func TestAccAutoScalingGroup_InstanceRefresh_start(t *testing.T) { func TestAccAutoScalingGroup_InstanceRefresh_triggers(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1596,7 +1600,7 @@ func TestAccAutoScalingGroup_InstanceRefresh_triggers(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "instance_refresh.0.triggers.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "instance_refresh.0.triggers.*", "tag"), testAccCheckInstanceRefreshCount(ctx, &group, 1), - testAccCheckInstanceRefreshStatus(ctx, &group, 0, autoscaling.InstanceRefreshStatusPending, autoscaling.InstanceRefreshStatusInProgress), + testAccCheckInstanceRefreshStatus(ctx, &group, 0, awstypes.InstanceRefreshStatusPending, awstypes.InstanceRefreshStatusInProgress), ), }, }, @@ -1605,7 +1609,7 @@ func TestAccAutoScalingGroup_InstanceRefresh_triggers(t *testing.T) { func TestAccAutoScalingGroup_InstanceRefresh_autoRollback(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -1658,7 +1662,7 @@ func TestAccAutoScalingGroup_InstanceRefresh_autoRollback(t *testing.T) { // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/256 func TestAccAutoScalingGroup_loadBalancers(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1696,7 +1700,7 @@ func TestAccAutoScalingGroup_loadBalancers(t *testing.T) { func TestAccAutoScalingGroup_targetGroups(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1734,7 +1738,7 @@ func TestAccAutoScalingGroup_targetGroups(t *testing.T) { func TestAccAutoScalingGroup_ALBTargetGroups_elbCapacity(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) subnetCount := 2 @@ -1761,7 +1765,7 @@ func TestAccAutoScalingGroup_ALBTargetGroups_elbCapacity(t *testing.T) { func TestAccAutoScalingGroup_warmPool(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1808,7 +1812,7 @@ func TestAccAutoScalingGroup_warmPool(t *testing.T) { func TestAccAutoScalingGroup_launchTempPartitionNum(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1831,7 +1835,7 @@ func TestAccAutoScalingGroup_launchTempPartitionNum(t *testing.T) { func TestAccAutoScalingGroup_Destroy_whenProtectedFromScaleIn(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1859,7 +1863,7 @@ func TestAccAutoScalingGroup_Destroy_whenProtectedFromScaleIn(t *testing.T) { func TestAccAutoScalingGroup_mixedInstancesPolicy(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1891,7 +1895,7 @@ func TestAccAutoScalingGroup_mixedInstancesPolicy(t *testing.T) { func TestAccAutoScalingGroup_MixedInstancesPolicy_capacityRebalance(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1926,7 +1930,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicy_capacityRebalance(t *testing.T func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_onDemandAllocationStrategy(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1952,7 +1956,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_onDemandA func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_onDemandBaseCapacity(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1997,7 +2001,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_onDemandB // Test to verify fix for behavior in GH-ISSUE 7368 func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_updateToZeroOnDemandBaseCapacity(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2033,7 +2037,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_updateToZ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_onDemandPercentageAboveBaseCapacity(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2068,7 +2072,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_onDemandP func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_spotAllocationStrategy(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2094,7 +2098,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_spotAlloc func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_spotInstancePools(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2129,7 +2133,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_spotInsta func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_spotMaxPrice(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2173,7 +2177,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyInstancesDistribution_spotMaxPr func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateLaunchTemplateSpecification_launchTemplateName(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) launchTemplateNameUpdated := fmt.Sprintf("%s_updated", rName) @@ -2210,7 +2214,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateLaunchTemplateSpe func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateLaunchTemplateSpecification_version(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2247,7 +2251,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateLaunchTemplateSpe func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceType(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2286,7 +2290,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceTypeWithLaunchTemplateSpecification(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2316,7 +2320,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_weightedCapacity(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2346,7 +2350,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_weighted func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_weightedCapacity_withELB(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2376,7 +2380,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_weighted func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_memoryMiBAndVCPUCount(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2442,7 +2446,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_acceleratorCount(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2529,7 +2533,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_acceleratorManufacturers(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2590,7 +2594,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_acceleratorNames(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2654,7 +2658,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_acceleratorTotalMemoryMiB(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2743,7 +2747,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_acceleratorTypes(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2803,7 +2807,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_allowedInstanceTypes(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2863,7 +2867,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_bareMetal(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -2939,7 +2943,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_baselineEBSBandwidthMbps(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3026,7 +3030,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_burstablePerformance(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3102,7 +3106,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_cpuManufacturers(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3162,7 +3166,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_excludedInstanceTypes(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3222,7 +3226,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_instanceGenerations(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3281,7 +3285,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_localStorage(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3357,7 +3361,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_localStorageTypes(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3416,7 +3420,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_memoryGiBPerVCPU(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3503,7 +3507,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_networkBandwidthGbps(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3590,7 +3594,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_networkInterfaceCount(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3677,7 +3681,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_onDemandMaxPricePercentageOverLowestPrice(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3713,7 +3717,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_requireHibernateSupport(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3769,7 +3773,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_spotMaxPricePercentageOverLowestPrice(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3805,7 +3809,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_totalLocalStorageGB(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3892,7 +3896,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_desiredCapacityTypeUnits(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3922,7 +3926,7 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_desiredCapacityTypeVCPU(t *testing.T) { ctx := acctest.Context(t) - var group autoscaling.Group + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_group.test" @@ -3950,18 +3954,14 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance }) } -func testAccCheckGroupExists(ctx context.Context, n string, v *autoscaling.Group) resource.TestCheckFunc { +func testAccCheckGroupExists(ctx context.Context, n string, v *awstypes.AutoScalingGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Auto Scaling Group ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) output, err := tfautoscaling.FindGroupByName(ctx, conn, rs.Primary.ID) @@ -3977,7 +3977,7 @@ func testAccCheckGroupExists(ctx context.Context, n string, v *autoscaling.Group func testAccCheckGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_group" { @@ -4001,12 +4001,12 @@ func testAccCheckGroupDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckGroupHealthyInstanceCount(v *autoscaling.Group, expected int) resource.TestCheckFunc { +func testAccCheckGroupHealthyInstanceCount(v *awstypes.AutoScalingGroup, expected int) resource.TestCheckFunc { return func(s *terraform.State) error { count := 0 for _, v := range v.Instances { - if aws.StringValue(v.HealthStatus) == tfautoscaling.InstanceHealthStatusHealthy { + if aws.ToString(v.HealthStatus) == tfautoscaling.InstanceHealthStatusHealthy { count++ } } @@ -4019,9 +4019,9 @@ func testAccCheckGroupHealthyInstanceCount(v *autoscaling.Group, expected int) r } } -func testAccCheckInstanceRefreshCount(ctx context.Context, v *autoscaling.Group, expected int) resource.TestCheckFunc { +func testAccCheckInstanceRefreshCount(ctx context.Context, v *awstypes.AutoScalingGroup, expected int) resource.TestCheckFunc { return func(state *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) output, err := tfautoscaling.FindInstanceRefreshes(ctx, conn, &autoscaling.DescribeInstanceRefreshesInput{ AutoScalingGroupName: v.AutoScalingGroupName, @@ -4039,9 +4039,9 @@ func testAccCheckInstanceRefreshCount(ctx context.Context, v *autoscaling.Group, } } -func testAccCheckInstanceRefreshStatus(ctx context.Context, v *autoscaling.Group, index int, expected ...string) resource.TestCheckFunc { +func testAccCheckInstanceRefreshStatus(ctx context.Context, v *awstypes.AutoScalingGroup, index int, expected ...awstypes.InstanceRefreshStatus) resource.TestCheckFunc { return func(state *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) output, err := tfautoscaling.FindInstanceRefreshes(ctx, conn, &autoscaling.DescribeInstanceRefreshesInput{ AutoScalingGroupName: v.AutoScalingGroupName, @@ -4055,7 +4055,7 @@ func testAccCheckInstanceRefreshStatus(ctx context.Context, v *autoscaling.Group return fmt.Errorf("Expected at least %d Instance Refreshes, got %d", index+1, got) } - status := aws.StringValue(output[index].Status) + status := output[index].Status for _, v := range expected { if status == v { @@ -4074,10 +4074,6 @@ func testAccCheckLBTargetGroupExists(ctx context.Context, n string, v *elbv2.Tar return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No ELBv2 Target Group ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) output, err := tfelbv2.FindTargetGroupByARN(ctx, conn, rs.Primary.ID) @@ -4107,7 +4103,7 @@ func testAccCheckALBTargetGroupHealthy(ctx context.Context, v *elbv2.TargetGroup } for _, v := range output.TargetHealthDescriptions { - if v.TargetHealth == nil || aws.StringValue(v.TargetHealth.State) != elbv2.TargetHealthStateEnumHealthy { + if v.TargetHealth == nil || aws.ToString(v.TargetHealth.State) != elbv2.TargetHealthStateEnumHealthy { return errors.New("Not all instances in target group are healthy yet, but should be") } } diff --git a/internal/service/autoscaling/groups_data_source.go b/internal/service/autoscaling/groups_data_source.go index e0d7cd1545b..2efa1c7b4c7 100644 --- a/internal/service/autoscaling/groups_data_source.go +++ b/internal/service/autoscaling/groups_data_source.go @@ -7,8 +7,9 @@ import ( "context" "sort" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -16,8 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" ) -// @SDKDataSource("aws_autoscaling_groups") -func DataSourceGroups() *schema.Resource { +// @SDKDataSource("aws_autoscaling_groups", name="Groups") +func dataSourceGroups() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceGroupsRead, @@ -54,13 +55,13 @@ func DataSourceGroups() *schema.Resource { } } -func buildFiltersDataSource(set *schema.Set) []*autoscaling.Filter { - var filters []*autoscaling.Filter +func buildFiltersDataSource(set *schema.Set) []awstypes.Filter { + var filters []awstypes.Filter for _, v := range set.List() { m := v.(map[string]interface{}) - var filterValues []*string + var filterValues []string for _, e := range m["values"].([]interface{}) { - filterValues = append(filterValues, aws.String(e.(string))) + filterValues = append(filterValues, e.(string)) } // In previous iterations, users were expected to provide "key" and "value" tag names. @@ -73,7 +74,7 @@ func buildFiltersDataSource(set *schema.Set) []*autoscaling.Filter { if name == "value" { name = "tag-value" } - filters = append(filters, &autoscaling.Filter{ + filters = append(filters, awstypes.Filter{ Name: aws.String(name), Values: filterValues, }) @@ -83,12 +84,12 @@ func buildFiltersDataSource(set *schema.Set) []*autoscaling.Filter { func dataSourceGroupsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) input := &autoscaling.DescribeAutoScalingGroupsInput{} if v, ok := d.GetOk("names"); ok && len(v.([]interface{})) > 0 { - input.AutoScalingGroupNames = flex.ExpandStringList(v.([]interface{})) + input.AutoScalingGroupNames = flex.ExpandStringValueList(v.([]interface{})) } if v, ok := d.GetOk("filter"); ok { @@ -104,8 +105,8 @@ func dataSourceGroupsRead(ctx context.Context, d *schema.ResourceData, meta inte var arns, names []string for _, group := range groups { - arns = append(arns, aws.StringValue(group.AutoScalingGroupARN)) - names = append(names, aws.StringValue(group.AutoScalingGroupName)) + arns = append(arns, aws.ToString(group.AutoScalingGroupARN)) + names = append(names, aws.ToString(group.AutoScalingGroupName)) } sort.Strings(arns) diff --git a/internal/service/autoscaling/launch_configuration.go b/internal/service/autoscaling/launch_configuration.go index 0b34279510f..4c8e2b6bf8f 100644 --- a/internal/service/autoscaling/launch_configuration.go +++ b/internal/service/autoscaling/launch_configuration.go @@ -10,10 +10,11 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -21,6 +22,7 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" @@ -30,7 +32,7 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports ) // @SDKResource("aws_launch_configuration", name="Launch Configuration") -func ResourceLaunchConfiguration() *schema.Resource { +func resourceLaunchConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLaunchConfigurationCreate, ReadWithoutTimeout: resourceLaunchConfigurationRead, @@ -182,7 +184,7 @@ func ResourceLaunchConfiguration() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{autoscaling.InstanceMetadataEndpointStateEnabled, autoscaling.InstanceMetadataEndpointStateDisabled}, false), + ValidateFunc: validation.StringInSlice(enum.Slice(awstypes.InstanceMetadataEndpointStateEnabled, awstypes.InstanceMetadataEndpointStateDisabled), false), }, "http_put_response_hop_limit": { Type: schema.TypeInt, @@ -196,7 +198,7 @@ func ResourceLaunchConfiguration() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{autoscaling.InstanceMetadataHttpTokensStateOptional, autoscaling.InstanceMetadataHttpTokensStateRequired}, false), + ValidateFunc: validation.StringInSlice(enum.Slice(awstypes.InstanceMetadataHttpTokensStateOptional, awstypes.InstanceMetadataHttpTokensStateRequired), false), }, }, }, @@ -310,7 +312,7 @@ func ResourceLaunchConfiguration() *schema.Resource { func resourceLaunchConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - autoscalingconn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + autoscalingconn := meta.(*conns.AWSClient).AutoScalingClient(ctx) ec2conn := meta.(*conns.AWSClient).EC2Conn(ctx) lcName := create.Name(d.Get("name").(string), d.Get("name_prefix").(string)) @@ -330,7 +332,7 @@ func resourceLaunchConfigurationCreate(ctx context.Context, d *schema.ResourceDa input.IamInstanceProfile = aws.String(v.(string)) } - input.InstanceMonitoring = &autoscaling.InstanceMonitoring{ + input.InstanceMonitoring = &awstypes.InstanceMonitoring{ Enabled: aws.Bool(d.Get("enable_monitoring").(bool)), } @@ -347,7 +349,7 @@ func resourceLaunchConfigurationCreate(ctx context.Context, d *schema.ResourceDa } if v, ok := d.GetOk("security_groups"); ok && v.(*schema.Set).Len() > 0 { - input.SecurityGroups = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroups = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("spot_price"); ok { @@ -367,13 +369,13 @@ func resourceLaunchConfigurationCreate(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "creating Auto Scaling Launch Configuration (%s): %s", lcName, err) } - var blockDeviceMappings []*autoscaling.BlockDeviceMapping + var blockDeviceMappings []awstypes.BlockDeviceMapping if v, ok := d.GetOk("ebs_block_device"); ok && v.(*schema.Set).Len() > 0 { v := expandBlockDeviceMappings(v.(*schema.Set).List(), expandBlockDeviceMappingForEBSBlockDevice) for _, v := range v { - if aws.StringValue(v.DeviceName) == rootDeviceName { + if aws.ToString(v.DeviceName) == rootDeviceName { return sdkdiag.AppendErrorf(diags, "root device (%s) declared as an 'ebs_block_device'. Use 'root_block_device' argument.", rootDeviceName) } } @@ -398,12 +400,11 @@ func resourceLaunchConfigurationCreate(ctx context.Context, d *schema.ResourceDa input.BlockDeviceMappings = blockDeviceMappings } - log.Printf("[DEBUG] Creating Auto Scaling Launch Configuration: %s", input) // IAM profiles can take ~10 seconds to propagate in AWS: // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console _, err = tfresource.RetryWhen(ctx, propagationTimeout, func() (interface{}, error) { - return autoscalingconn.CreateLaunchConfigurationWithContext(ctx, &input) + return autoscalingconn.CreateLaunchConfiguration(ctx, &input) }, func(err error) (bool, error) { if tfawserr.ErrMessageContains(err, errCodeValidationError, "Invalid IamInstanceProfile") || @@ -425,10 +426,10 @@ func resourceLaunchConfigurationCreate(ctx context.Context, d *schema.ResourceDa func resourceLaunchConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - autoscalingconn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + autoscalingconn := meta.(*conns.AWSClient).AutoScalingClient(ctx) ec2conn := meta.(*conns.AWSClient).EC2Conn(ctx) - lc, err := FindLaunchConfigurationByName(ctx, autoscalingconn, d.Id()) + lc, err := findLaunchConfigurationByName(ctx, autoscalingconn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Auto Scaling Launch Configuration %s not found, removing from state", d.Id()) @@ -460,11 +461,11 @@ func resourceLaunchConfigurationRead(ctx context.Context, d *schema.ResourceData d.Set("metadata_options", nil) } d.Set("name", lc.LaunchConfigurationName) - d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(lc.LaunchConfigurationName))) + d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(lc.LaunchConfigurationName))) d.Set("placement_tenancy", lc.PlacementTenancy) - d.Set("security_groups", aws.StringValueSlice(lc.SecurityGroups)) + d.Set("security_groups", lc.SecurityGroups) d.Set("spot_price", lc.SpotPrice) - if v := aws.StringValue(lc.UserData); v != "" { + if v := aws.ToString(lc.UserData); v != "" { if _, ok := d.GetOk("user_data_base64"); ok { d.Set("user_data_base64", v) } else { @@ -512,16 +513,15 @@ func resourceLaunchConfigurationRead(ctx context.Context, d *schema.ResourceData func resourceLaunchConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) log.Printf("[DEBUG] Deleting Auto Scaling Launch Configuration: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, + _, err := tfresource.RetryWhenIsA[*awstypes.ResourceInUseFault](ctx, propagationTimeout, func() (interface{}, error) { - return conn.DeleteLaunchConfigurationWithContext(ctx, &autoscaling.DeleteLaunchConfigurationInput{ + return conn.DeleteLaunchConfiguration(ctx, &autoscaling.DeleteLaunchConfigurationInput{ LaunchConfigurationName: aws.String(d.Id()), }) - }, - autoscaling.ErrCodeResourceInUseFault) + }) if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { return diags @@ -534,13 +534,9 @@ func resourceLaunchConfigurationDelete(ctx context.Context, d *schema.ResourceDa return diags } -func expandBlockDeviceMappingForEBSBlockDevice(tfMap map[string]interface{}) *autoscaling.BlockDeviceMapping { - if tfMap == nil { - return nil - } - - apiObject := &autoscaling.BlockDeviceMapping{ - Ebs: &autoscaling.Ebs{}, +func expandBlockDeviceMappingForEBSBlockDevice(tfMap map[string]interface{}) awstypes.BlockDeviceMapping { + apiObject := awstypes.BlockDeviceMapping{ + Ebs: &awstypes.Ebs{}, } if v, ok := tfMap["device_name"].(string); ok && v != "" { @@ -558,7 +554,7 @@ func expandBlockDeviceMappingForEBSBlockDevice(tfMap map[string]interface{}) *au } if v, ok := tfMap["iops"].(int); ok && v != 0 { - apiObject.Ebs.Iops = aws.Int64(int64(v)) + apiObject.Ebs.Iops = aws.Int32(int32(v)) } if v, ok := tfMap["snapshot_id"].(string); ok && v != "" { @@ -566,11 +562,11 @@ func expandBlockDeviceMappingForEBSBlockDevice(tfMap map[string]interface{}) *au } if v, ok := tfMap["throughput"].(int); ok && v != 0 { - apiObject.Ebs.Throughput = aws.Int64(int64(v)) + apiObject.Ebs.Throughput = aws.Int32(int32(v)) } if v, ok := tfMap["volume_size"].(int); ok && v != 0 { - apiObject.Ebs.VolumeSize = aws.Int64(int64(v)) + apiObject.Ebs.VolumeSize = aws.Int32(int32(v)) } if v, ok := tfMap["volume_type"].(string); ok && v != "" { @@ -580,12 +576,8 @@ func expandBlockDeviceMappingForEBSBlockDevice(tfMap map[string]interface{}) *au return apiObject } -func expandBlockDeviceMappingForEphemeralBlockDevice(tfMap map[string]interface{}) *autoscaling.BlockDeviceMapping { - if tfMap == nil { - return nil - } - - apiObject := &autoscaling.BlockDeviceMapping{} +func expandBlockDeviceMappingForEphemeralBlockDevice(tfMap map[string]interface{}) awstypes.BlockDeviceMapping { + apiObject := awstypes.BlockDeviceMapping{} if v, ok := tfMap["device_name"].(string); ok && v != "" { apiObject.DeviceName = aws.String(v) @@ -602,13 +594,9 @@ func expandBlockDeviceMappingForEphemeralBlockDevice(tfMap map[string]interface{ return apiObject } -func expandBlockDeviceMappingForRootBlockDevice(tfMap map[string]interface{}) *autoscaling.BlockDeviceMapping { - if tfMap == nil { - return nil - } - - apiObject := &autoscaling.BlockDeviceMapping{ - Ebs: &autoscaling.Ebs{}, +func expandBlockDeviceMappingForRootBlockDevice(tfMap map[string]interface{}) awstypes.BlockDeviceMapping { + apiObject := awstypes.BlockDeviceMapping{ + Ebs: &awstypes.Ebs{}, } if v, ok := tfMap["delete_on_termination"].(bool); ok { @@ -620,15 +608,15 @@ func expandBlockDeviceMappingForRootBlockDevice(tfMap map[string]interface{}) *a } if v, ok := tfMap["iops"].(int); ok && v != 0 { - apiObject.Ebs.Iops = aws.Int64(int64(v)) + apiObject.Ebs.Iops = aws.Int32(int32(v)) } if v, ok := tfMap["throughput"].(int); ok && v != 0 { - apiObject.Ebs.Throughput = aws.Int64(int64(v)) + apiObject.Ebs.Throughput = aws.Int32(int32(v)) } if v, ok := tfMap["volume_size"].(int); ok && v != 0 { - apiObject.Ebs.VolumeSize = aws.Int64(int64(v)) + apiObject.Ebs.VolumeSize = aws.Int32(int32(v)) } if v, ok := tfMap["volume_type"].(string); ok && v != "" { @@ -638,12 +626,12 @@ func expandBlockDeviceMappingForRootBlockDevice(tfMap map[string]interface{}) *a return apiObject } -func expandBlockDeviceMappings(tfList []interface{}, fn func(map[string]interface{}) *autoscaling.BlockDeviceMapping) []*autoscaling.BlockDeviceMapping { +func expandBlockDeviceMappings(tfList []interface{}, fn func(map[string]interface{}) awstypes.BlockDeviceMapping) []awstypes.BlockDeviceMapping { if len(tfList) == 0 { return nil } - var apiObjects []*autoscaling.BlockDeviceMapping + var apiObjects []awstypes.BlockDeviceMapping for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -653,18 +641,13 @@ func expandBlockDeviceMappings(tfList []interface{}, fn func(map[string]interfac } apiObject := fn(tfMap) - - if apiObject == nil { - continue - } - apiObjects = append(apiObjects, apiObject) } return apiObjects } -func flattenBlockDeviceMappings(apiObjects []*autoscaling.BlockDeviceMapping, rootDeviceName string, configuredEBSBlockDevices map[string]map[string]interface{}) ([]interface{}, []interface{}, []interface{}) { +func flattenBlockDeviceMappings(apiObjects []awstypes.BlockDeviceMapping, rootDeviceName string, configuredEBSBlockDevices map[string]map[string]interface{}) ([]interface{}, []interface{}, []interface{}) { if len(apiObjects) == 0 { return nil, nil, nil } @@ -672,14 +655,10 @@ func flattenBlockDeviceMappings(apiObjects []*autoscaling.BlockDeviceMapping, ro var tfListEBSBlockDevice, tfListEphemeralBlockDevice, tfListRootBlockDevice []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfMap := map[string]interface{}{} if v := apiObject.NoDevice; v != nil { - if v, ok := configuredEBSBlockDevices[aws.StringValue(apiObject.DeviceName)]; ok { + if v, ok := configuredEBSBlockDevices[aws.ToString(apiObject.DeviceName)]; ok { tfMap["delete_on_termination"] = v["delete_on_termination"].(bool) } else { // Keep existing value in place to avoid spurious diff. @@ -687,44 +666,44 @@ func flattenBlockDeviceMappings(apiObjects []*autoscaling.BlockDeviceMapping, ro } } else if v := apiObject.Ebs; v != nil { if v := v.DeleteOnTermination; v != nil { - tfMap["delete_on_termination"] = aws.BoolValue(v) + tfMap["delete_on_termination"] = aws.ToBool(v) } } if v := apiObject.Ebs; v != nil { if v := v.Encrypted; v != nil { - tfMap["encrypted"] = aws.BoolValue(v) + tfMap["encrypted"] = aws.ToBool(v) } if v := v.Iops; v != nil { - tfMap["iops"] = aws.Int64Value(v) + tfMap["iops"] = aws.ToInt32(v) } if v := v.Throughput; v != nil { - tfMap["throughput"] = aws.Int64Value(v) + tfMap["throughput"] = aws.ToInt32(v) } if v := v.VolumeSize; v != nil { - tfMap["volume_size"] = aws.Int64Value(v) + tfMap["volume_size"] = aws.ToInt32(v) } if v := v.VolumeType; v != nil { - tfMap["volume_type"] = aws.StringValue(v) + tfMap["volume_type"] = aws.ToString(v) } } - if v := apiObject.DeviceName; aws.StringValue(v) == rootDeviceName { + if v := apiObject.DeviceName; aws.ToString(v) == rootDeviceName { tfListRootBlockDevice = append(tfListRootBlockDevice, tfMap) continue } if v := apiObject.DeviceName; v != nil { - tfMap["device_name"] = aws.StringValue(v) + tfMap["device_name"] = aws.ToString(v) } if v := apiObject.VirtualName; v != nil { - tfMap["virtual_name"] = aws.StringValue(v) + tfMap["virtual_name"] = aws.ToString(v) tfListEphemeralBlockDevice = append(tfListEphemeralBlockDevice, tfMap) @@ -732,12 +711,12 @@ func flattenBlockDeviceMappings(apiObjects []*autoscaling.BlockDeviceMapping, ro } if v := apiObject.NoDevice; v != nil { - tfMap["no_device"] = aws.BoolValue(v) + tfMap["no_device"] = aws.ToBool(v) } if v := apiObject.Ebs; v != nil { if v := v.SnapshotId; v != nil { - tfMap["snapshot_id"] = aws.StringValue(v) + tfMap["snapshot_id"] = aws.ToString(v) } } @@ -747,23 +726,23 @@ func flattenBlockDeviceMappings(apiObjects []*autoscaling.BlockDeviceMapping, ro return tfListEBSBlockDevice, tfListEphemeralBlockDevice, tfListRootBlockDevice } -func expandInstanceMetadataOptions(tfMap map[string]interface{}) *autoscaling.InstanceMetadataOptions { +func expandInstanceMetadataOptions(tfMap map[string]interface{}) *awstypes.InstanceMetadataOptions { if tfMap == nil { return nil } - apiObject := &autoscaling.InstanceMetadataOptions{} + apiObject := &awstypes.InstanceMetadataOptions{} if v, ok := tfMap["http_endpoint"].(string); ok && v != "" { - apiObject.HttpEndpoint = aws.String(v) + apiObject.HttpEndpoint = awstypes.InstanceMetadataEndpointState(v) - if v == autoscaling.InstanceMetadataEndpointStateEnabled { + if v := awstypes.InstanceMetadataEndpointState(v); v == awstypes.InstanceMetadataEndpointStateEnabled { if v, ok := tfMap["http_tokens"].(string); ok && v != "" { - apiObject.HttpTokens = aws.String(v) + apiObject.HttpTokens = awstypes.InstanceMetadataHttpTokensState(v) } if v, ok := tfMap["http_put_response_hop_limit"].(int); ok && v != 0 { - apiObject.HttpPutResponseHopLimit = aws.Int64(int64(v)) + apiObject.HttpPutResponseHopLimit = aws.Int32(int32(v)) } } } @@ -771,24 +750,20 @@ func expandInstanceMetadataOptions(tfMap map[string]interface{}) *autoscaling.In return apiObject } -func flattenInstanceMetadataOptions(apiObject *autoscaling.InstanceMetadataOptions) map[string]interface{} { +func flattenInstanceMetadataOptions(apiObject *awstypes.InstanceMetadataOptions) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.HttpEndpoint; v != nil { - tfMap["http_endpoint"] = aws.StringValue(v) - } + tfMap["http_endpoint"] = string(apiObject.HttpEndpoint) if v := apiObject.HttpPutResponseHopLimit; v != nil { - tfMap["http_put_response_hop_limit"] = aws.Int64Value(v) + tfMap["http_put_response_hop_limit"] = aws.ToInt32(v) } - if v := apiObject.HttpTokens; v != nil { - tfMap["http_tokens"] = aws.StringValue(v) - } + tfMap["http_tokens"] = string(apiObject.HttpTokens) return tfMap } @@ -806,53 +781,37 @@ func userDataHashSum(userData string) string { return hex.EncodeToString(hash[:]) } -func findLaunchConfiguration(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeLaunchConfigurationsInput) (*autoscaling.LaunchConfiguration, error) { +func findLaunchConfiguration(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeLaunchConfigurationsInput) (*awstypes.LaunchConfiguration, error) { output, err := findLaunchConfigurations(ctx, conn, input) if err != nil { return nil, err } - if len(output) == 0 || output[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output[0], nil + return tfresource.AssertSingleValueResult(output) } -func findLaunchConfigurations(ctx context.Context, conn *autoscaling.AutoScaling, input *autoscaling.DescribeLaunchConfigurationsInput) ([]*autoscaling.LaunchConfiguration, error) { - var output []*autoscaling.LaunchConfiguration +func findLaunchConfigurations(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeLaunchConfigurationsInput) ([]awstypes.LaunchConfiguration, error) { + var output []awstypes.LaunchConfiguration - err := conn.DescribeLaunchConfigurationsPagesWithContext(ctx, input, func(page *autoscaling.DescribeLaunchConfigurationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := autoscaling.NewDescribeLaunchConfigurationsPaginator(conn, input) - for _, v := range page.LaunchConfigurations { - if v == nil { - continue - } + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - output = append(output, v) + if err != nil { + return nil, err } - return !lastPage - }) - - if err != nil { - return nil, err + output = append(output, page.LaunchConfigurations...) } return output, nil } -func FindLaunchConfigurationByName(ctx context.Context, conn *autoscaling.AutoScaling, name string) (*autoscaling.LaunchConfiguration, error) { +func findLaunchConfigurationByName(ctx context.Context, conn *autoscaling.Client, name string) (*awstypes.LaunchConfiguration, error) { input := &autoscaling.DescribeLaunchConfigurationsInput{ - LaunchConfigurationNames: aws.StringSlice([]string{name}), + LaunchConfigurationNames: []string{name}, } output, err := findLaunchConfiguration(ctx, conn, input) @@ -862,7 +821,7 @@ func FindLaunchConfigurationByName(ctx context.Context, conn *autoscaling.AutoSc } // Eventual consistency check. - if aws.StringValue(output.LaunchConfigurationName) != name { + if aws.ToString(output.LaunchConfigurationName) != name { return nil, &retry.NotFoundError{ LastRequest: input, } @@ -879,11 +838,11 @@ func findImageRootDeviceName(ctx context.Context, conn *ec2.EC2, imageID string) } // Instance store backed AMIs do not provide a root device name. - if aws.StringValue(image.RootDeviceType) == ec2.DeviceTypeInstanceStore { + if aws.ToString(image.RootDeviceType) == ec2.DeviceTypeInstanceStore { return "", nil } - rootDeviceName := aws.StringValue(image.RootDeviceName) + rootDeviceName := aws.ToString(image.RootDeviceName) // Some AMIs have a RootDeviceName like "/dev/sda1" that does not appear as a // DeviceName in the BlockDeviceMapping list (which will instead have @@ -899,13 +858,13 @@ func findImageRootDeviceName(ctx context.Context, conn *ec2.EC2, imageID string) rootDeviceInBlockDeviceMappings := false for _, v := range image.BlockDeviceMappings { - if aws.StringValue(v.DeviceName) == rootDeviceName { + if aws.ToString(v.DeviceName) == rootDeviceName { rootDeviceInBlockDeviceMappings = true } } if !rootDeviceInBlockDeviceMappings && len(image.BlockDeviceMappings) > 0 { - rootDeviceName = aws.StringValue(image.BlockDeviceMappings[0].DeviceName) + rootDeviceName = aws.ToString(image.BlockDeviceMappings[0].DeviceName) } if rootDeviceName == "" { diff --git a/internal/service/autoscaling/launch_configuration_data_source.go b/internal/service/autoscaling/launch_configuration_data_source.go index 14e3def9785..d167b070d57 100644 --- a/internal/service/autoscaling/launch_configuration_data_source.go +++ b/internal/service/autoscaling/launch_configuration_data_source.go @@ -6,15 +6,14 @@ package autoscaling import ( "context" - "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" ) -// @SDKDataSource("aws_launch_configuration") -func DataSourceLaunchConfiguration() *schema.Resource { +// @SDKDataSource("aws_launch_configuration", name="Launch Configuration") +func dataSourceLaunchConfiguration() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLaunchConfigurationRead, @@ -190,11 +189,11 @@ func DataSourceLaunchConfiguration() *schema.Resource { func dataSourceLaunchConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - autoscalingconn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + autoscalingconn := meta.(*conns.AWSClient).AutoScalingClient(ctx) ec2conn := meta.(*conns.AWSClient).EC2Conn(ctx) name := d.Get("name").(string) - lc, err := FindLaunchConfigurationByName(ctx, autoscalingconn, name) + lc, err := findLaunchConfigurationByName(ctx, autoscalingconn, name) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Auto Scaling Launch Configuration (%s): %s", name, err) @@ -223,7 +222,7 @@ func dataSourceLaunchConfigurationRead(ctx context.Context, d *schema.ResourceDa } d.Set("name", lc.LaunchConfigurationName) d.Set("placement_tenancy", lc.PlacementTenancy) - d.Set("security_groups", aws.StringValueSlice(lc.SecurityGroups)) + d.Set("security_groups", lc.SecurityGroups) d.Set("spot_price", lc.SpotPrice) d.Set("user_data", lc.UserData) diff --git a/internal/service/autoscaling/launch_configuration_test.go b/internal/service/autoscaling/launch_configuration_test.go index 8581d5607b7..2d70df46842 100644 --- a/internal/service/autoscaling/launch_configuration_test.go +++ b/internal/service/autoscaling/launch_configuration_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/aws/aws-sdk-go/service/ec2" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -25,7 +25,7 @@ import ( func TestAccAutoScalingLaunchConfiguration_basic(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -71,7 +71,7 @@ func TestAccAutoScalingLaunchConfiguration_basic(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -95,7 +95,7 @@ func TestAccAutoScalingLaunchConfiguration_disappears(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_Name_generated(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ @@ -123,7 +123,7 @@ func TestAccAutoScalingLaunchConfiguration_Name_generated(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ @@ -151,7 +151,7 @@ func TestAccAutoScalingLaunchConfiguration_namePrefix(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_withBlockDevices(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -189,36 +189,10 @@ func TestAccAutoScalingLaunchConfiguration_withBlockDevices(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAutoScalingLaunchConfiguration_withInstanceStoreAMI(t *testing.T) { - ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_launch_configuration.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.AutoScalingServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLaunchConfigurationDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccLaunchConfigurationConfig_instanceStoreAMI(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckLaunchConfigurationExists(ctx, resourceName, &conf), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, }, }, }) @@ -227,7 +201,7 @@ func TestAccAutoScalingLaunchConfiguration_withInstanceStoreAMI(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_RootBlockDevice_amiDisappears(t *testing.T) { ctx := acctest.Context(t) var ami ec2.Image - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) amiCopyResourceName := "aws_ami_copy.test" resourceName := "aws_launch_configuration.test" @@ -259,7 +233,7 @@ func TestAccAutoScalingLaunchConfiguration_RootBlockDevice_amiDisappears(t *test func TestAccAutoScalingLaunchConfiguration_RootBlockDevice_volumeSize(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -296,7 +270,7 @@ func TestAccAutoScalingLaunchConfiguration_RootBlockDevice_volumeSize(t *testing func TestAccAutoScalingLaunchConfiguration_encryptedRootBlockDevice(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -329,7 +303,7 @@ func TestAccAutoScalingLaunchConfiguration_encryptedRootBlockDevice(t *testing.T func TestAccAutoScalingLaunchConfiguration_withSpotPrice(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -357,7 +331,7 @@ func TestAccAutoScalingLaunchConfiguration_withSpotPrice(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_withIAMProfile(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -385,7 +359,7 @@ func TestAccAutoScalingLaunchConfiguration_withIAMProfile(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_withGP3(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -425,7 +399,7 @@ func TestAccAutoScalingLaunchConfiguration_withGP3(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_encryptedEBSBlockDevice(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -480,7 +454,7 @@ func TestAccAutoScalingLaunchConfiguration_encryptedEBSBlockDevice(t *testing.T) func TestAccAutoScalingLaunchConfiguration_metadataOptions(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -511,7 +485,7 @@ func TestAccAutoScalingLaunchConfiguration_metadataOptions(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_EBS_noDevice(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -543,7 +517,7 @@ func TestAccAutoScalingLaunchConfiguration_EBS_noDevice(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_userData(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration + var conf awstypes.LaunchConfiguration rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" @@ -578,8 +552,8 @@ func TestAccAutoScalingLaunchConfiguration_userData(t *testing.T) { func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetFalseConfigNull(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration - var group autoscaling.Group + var conf awstypes.LaunchConfiguration + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" groupResourceName := "aws_autoscaling_group.test" @@ -610,8 +584,8 @@ func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetFalseC func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetFalseConfigFalse(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration - var group autoscaling.Group + var conf awstypes.LaunchConfiguration + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" groupResourceName := "aws_autoscaling_group.test" @@ -642,8 +616,8 @@ func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetFalseC func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetFalseConfigTrue(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration - var group autoscaling.Group + var conf awstypes.LaunchConfiguration + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" groupResourceName := "aws_autoscaling_group.test" @@ -674,8 +648,8 @@ func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetFalseC func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetTrueConfigNull(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration - var group autoscaling.Group + var conf awstypes.LaunchConfiguration + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" groupResourceName := "aws_autoscaling_group.test" @@ -706,8 +680,8 @@ func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetTrueCo func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetTrueConfigFalse(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration - var group autoscaling.Group + var conf awstypes.LaunchConfiguration + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" groupResourceName := "aws_autoscaling_group.test" @@ -738,8 +712,8 @@ func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetTrueCo func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetTrueConfigTrue(t *testing.T) { ctx := acctest.Context(t) - var conf autoscaling.LaunchConfiguration - var group autoscaling.Group + var conf awstypes.LaunchConfiguration + var group awstypes.AutoScalingGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_launch_configuration.test" groupResourceName := "aws_autoscaling_group.test" @@ -770,7 +744,7 @@ func TestAccAutoScalingLaunchConfiguration_AssociatePublicIPAddress_subnetTrueCo func testAccCheckLaunchConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_launch_configuration" { @@ -794,18 +768,14 @@ func testAccCheckLaunchConfigurationDestroy(ctx context.Context) resource.TestCh } } -func testAccCheckLaunchConfigurationExists(ctx context.Context, n string, v *autoscaling.LaunchConfiguration) resource.TestCheckFunc { +func testAccCheckLaunchConfigurationExists(ctx context.Context, n string, v *awstypes.LaunchConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Auto Scaling Launch Configuration ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) output, err := tfautoscaling.FindLaunchConfigurationByName(ctx, conn, rs.Primary.ID) @@ -826,10 +796,6 @@ func testAccCheckAMIExists(ctx context.Context, n string, v *ec2.Image) resource return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No EC2 AMI ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) output, err := tfec2.FindImageByID(ctx, conn, rs.Primary.ID) @@ -844,18 +810,18 @@ func testAccCheckAMIExists(ctx context.Context, n string, v *ec2.Image) resource } } -func testAccCheckInstanceHasPublicIPAddress(ctx context.Context, group *autoscaling.Group, idx int, expected bool) resource.TestCheckFunc { +func testAccCheckInstanceHasPublicIPAddress(ctx context.Context, group *awstypes.AutoScalingGroup, idx int, expected bool) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) - instanceID := aws.StringValue(group.Instances[idx].InstanceId) + instanceID := aws.ToString(group.Instances[idx].InstanceId) instance, err := tfec2.FindInstanceByID(ctx, conn, instanceID) if err != nil { return err } - hasPublicIPAddress := aws.StringValue(instance.PublicIpAddress) != "" + hasPublicIPAddress := aws.ToString(instance.PublicIpAddress) != "" if hasPublicIPAddress != expected { return fmt.Errorf("%s has public IP address; got %t, expected %t", instanceID, hasPublicIPAddress, expected) @@ -926,40 +892,6 @@ resource "aws_launch_configuration" "test" { `, rName)) } -// testAccLatestAmazonLinuxPVInstanceStoreAMIConfig returns the configuration for a data source that -// describes the latest Amazon Linux AMI using PV virtualization and an instance store root device. -// The data source is named 'amzn-ami-minimal-pv-instance-store'. -func testAccLatestAmazonLinuxPVInstanceStoreAMIConfig() string { - return ` -data "aws_ami" "amzn-ami-minimal-pv-instance-store" { - most_recent = true - owners = ["amazon"] - - filter { - name = "name" - values = ["amzn-ami-minimal-pv-*"] - } - - filter { - name = "root-device-type" - values = ["instance-store"] - } -} -` -} - -func testAccLaunchConfigurationConfig_instanceStoreAMI(rName string) string { - return acctest.ConfigCompose(testAccLatestAmazonLinuxPVInstanceStoreAMIConfig(), fmt.Sprintf(` -resource "aws_launch_configuration" "test" { - name = %[1]q - image_id = data.aws_ami.amzn-ami-minimal-pv-instance-store.id - - # When the instance type is updated, the new type must support ephemeral storage. - instance_type = "m1.small" -} -`, rName)) -} - func testAccLaunchConfigurationConfig_cofingRootBlockDeviceCopiedAMI(rName string) string { return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), fmt.Sprintf(` data "aws_region" "current" {} diff --git a/internal/service/autoscaling/lifecycle_hook.go b/internal/service/autoscaling/lifecycle_hook.go index 80df04ec4b0..a717f8a768f 100644 --- a/internal/service/autoscaling/lifecycle_hook.go +++ b/internal/service/autoscaling/lifecycle_hook.go @@ -11,21 +11,24 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -// @SDKResource("aws_autoscaling_lifecycle_hook") -func ResourceLifecycleHook() *schema.Resource { +// @SDKResource("aws_autoscaling_lifecycle_hook", name="Lifecycle Hook") +func resourceLifecycleHook() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLifecycleHookPut, ReadWithoutTimeout: resourceLifecycleHookRead, @@ -42,10 +45,10 @@ func ResourceLifecycleHook() *schema.Resource { Required: true, }, "default_result": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(lifecycleHookDefaultResult_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[lifecycleHookDefaultResult](), }, "heartbeat_timeout": { Type: schema.TypeInt, @@ -53,9 +56,9 @@ func ResourceLifecycleHook() *schema.Resource { ValidateFunc: validation.IntBetween(30, 7200), }, "lifecycle_transition": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(lifecycleHookLifecycleTransition_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[lifecycleHookLifecycleTransition](), }, "name": { Type: schema.TypeString, @@ -87,7 +90,7 @@ func ResourceLifecycleHook() *schema.Resource { func resourceLifecycleHookPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) name := d.Get("name").(string) input := &autoscaling.PutLifecycleHookInput{ @@ -100,7 +103,7 @@ func resourceLifecycleHookPut(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("heartbeat_timeout"); ok { - input.HeartbeatTimeout = aws.Int64(int64(v.(int))) + input.HeartbeatTimeout = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("lifecycle_transition"); ok { @@ -121,7 +124,7 @@ func resourceLifecycleHookPut(ctx context.Context, d *schema.ResourceData, meta _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 5*time.Minute, func() (interface{}, error) { - return conn.PutLifecycleHookWithContext(ctx, input) + return conn.PutLifecycleHook(ctx, input) }, errCodeValidationError, "Unable to publish test message to notification target") @@ -136,9 +139,9 @@ func resourceLifecycleHookPut(ctx context.Context, d *schema.ResourceData, meta func resourceLifecycleHookRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) - p, err := FindLifecycleHook(ctx, conn, d.Get("autoscaling_group_name").(string), d.Id()) + p, err := findLifecycleHookByTwoPartKey(ctx, conn, d.Get("autoscaling_group_name").(string), d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Auto Scaling Lifecycle Hook %s not found, removing from state", d.Id()) @@ -163,15 +166,15 @@ func resourceLifecycleHookRead(ctx context.Context, d *schema.ResourceData, meta func resourceLifecycleHookDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) log.Printf("[INFO] Deleting Auto Scaling Lifecycle Hook: %s", d.Id()) - _, err := conn.DeleteLifecycleHookWithContext(ctx, &autoscaling.DeleteLifecycleHookInput{ + _, err := conn.DeleteLifecycleHook(ctx, &autoscaling.DeleteLifecycleHookInput{ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), LifecycleHookName: aws.String(d.Id()), }) - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + if tfawserr.ErrMessageContains(err, errCodeValidationError, "No Lifecycle Hook found") { return diags } @@ -182,13 +185,13 @@ func resourceLifecycleHookDelete(ctx context.Context, d *schema.ResourceData, me return diags } -func FindLifecycleHook(ctx context.Context, conn *autoscaling.AutoScaling, asgName, hookName string) (*autoscaling.LifecycleHook, error) { +func findLifecycleHookByTwoPartKey(ctx context.Context, conn *autoscaling.Client, asgName, hookName string) (*awstypes.LifecycleHook, error) { input := &autoscaling.DescribeLifecycleHooksInput{ AutoScalingGroupName: aws.String(asgName), - LifecycleHookNames: aws.StringSlice([]string{hookName}), + LifecycleHookNames: []string{hookName}, } - output, err := conn.DescribeLifecycleHooksWithContext(ctx, input) + output, err := conn.DescribeLifecycleHooks(ctx, input) if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { return nil, &retry.NotFoundError{ @@ -205,8 +208,8 @@ func FindLifecycleHook(ctx context.Context, conn *autoscaling.AutoScaling, asgNa return nil, tfresource.NewEmptyResultError(input) } - for _, v := range output.LifecycleHooks { - if aws.StringValue(v.LifecycleHookName) == hookName { + for _, v := range tfslices.ToPointers(output.LifecycleHooks) { + if aws.ToString(v.LifecycleHookName) == hookName { return v, nil } } diff --git a/internal/service/autoscaling/lifecycle_hook_test.go b/internal/service/autoscaling/lifecycle_hook_test.go index 9f452ca468c..e76791ba9d2 100644 --- a/internal/service/autoscaling/lifecycle_hook_test.go +++ b/internal/service/autoscaling/lifecycle_hook_test.go @@ -101,13 +101,9 @@ func testAccCheckLifecycleHookExists(ctx context.Context, n string) resource.Tes return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Auto Scaling Lifecycle Hook ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) - _, err := tfautoscaling.FindLifecycleHook(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) + _, err := tfautoscaling.FindLifecycleHookByTwoPartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) return err } @@ -115,14 +111,14 @@ func testAccCheckLifecycleHookExists(ctx context.Context, n string) resource.Tes func testAccCheckLifecycleHookDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_lifecycle_hook" { continue } - _, err := tfautoscaling.FindLifecycleHook(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) + _, err := tfautoscaling.FindLifecycleHookByTwoPartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) if tfresource.NotFound(err) { continue diff --git a/internal/service/autoscaling/list_pages_gen.go b/internal/service/autoscaling/list_pages_gen.go deleted file mode 100644 index bc373278c5c..00000000000 --- a/internal/service/autoscaling/list_pages_gen.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by "internal/generate/listpages/main.go -ListOps=DescribeInstanceRefreshes,DescribeLoadBalancers,DescribeLoadBalancerTargetGroups,DescribeWarmPool"; DO NOT EDIT. - -package autoscaling - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" -) - -func describeInstanceRefreshesPages(ctx context.Context, conn autoscalingiface.AutoScalingAPI, input *autoscaling.DescribeInstanceRefreshesInput, fn func(*autoscaling.DescribeInstanceRefreshesOutput, bool) bool) error { - for { - output, err := conn.DescribeInstanceRefreshesWithContext(ctx, input) - if err != nil { - return err - } - - lastPage := aws.StringValue(output.NextToken) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.NextToken = output.NextToken - } - return nil -} -func describeLoadBalancerTargetGroupsPages(ctx context.Context, conn autoscalingiface.AutoScalingAPI, input *autoscaling.DescribeLoadBalancerTargetGroupsInput, fn func(*autoscaling.DescribeLoadBalancerTargetGroupsOutput, bool) bool) error { - for { - output, err := conn.DescribeLoadBalancerTargetGroupsWithContext(ctx, input) - if err != nil { - return err - } - - lastPage := aws.StringValue(output.NextToken) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.NextToken = output.NextToken - } - return nil -} -func describeLoadBalancersPages(ctx context.Context, conn autoscalingiface.AutoScalingAPI, input *autoscaling.DescribeLoadBalancersInput, fn func(*autoscaling.DescribeLoadBalancersOutput, bool) bool) error { - for { - output, err := conn.DescribeLoadBalancersWithContext(ctx, input) - if err != nil { - return err - } - - lastPage := aws.StringValue(output.NextToken) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.NextToken = output.NextToken - } - return nil -} -func describeWarmPoolPages(ctx context.Context, conn autoscalingiface.AutoScalingAPI, input *autoscaling.DescribeWarmPoolInput, fn func(*autoscaling.DescribeWarmPoolOutput, bool) bool) error { - for { - output, err := conn.DescribeWarmPoolWithContext(ctx, input) - if err != nil { - return err - } - - lastPage := aws.StringValue(output.NextToken) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.NextToken = output.NextToken - } - return nil -} diff --git a/internal/service/autoscaling/notification.go b/internal/service/autoscaling/notification.go index 618629834f0..baa0890aa30 100644 --- a/internal/service/autoscaling/notification.go +++ b/internal/service/autoscaling/notification.go @@ -5,20 +5,24 @@ package autoscaling import ( "context" - "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_autoscaling_notification") -func ResourceNotification() *schema.Resource { +// @SDKResource("aws_autoscaling_notification", name="Notification") +func resourceNotification() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceNotificationCreate, ReadWithoutTimeout: resourceNotificationRead, @@ -47,88 +51,59 @@ func ResourceNotification() *schema.Resource { func resourceNotificationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) - gl := flex.ExpandStringSet(d.Get("group_names").(*schema.Set)) - nl := flex.ExpandStringSet(d.Get("notifications").(*schema.Set)) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) topic := d.Get("topic_arn").(string) - if err := addNotificationConfigToGroupsWithTopic(ctx, conn, gl, nl, topic); err != nil { - return sdkdiag.AppendErrorf(diags, "creating Autoscaling Group Notification (%s): %s", topic, err) + if err := addNotificationConfigToGroupsWithTopic(ctx, conn, flex.ExpandStringSet(d.Get("group_names").(*schema.Set)), flex.ExpandStringSet(d.Get("notifications").(*schema.Set)), topic); err != nil { + return sdkdiag.AppendErrorf(diags, "creating Auto Scaling Notification (%s): %s", topic, err) } // ARNs are unique, and these notifications are per ARN, so we re-use the ARN - // here as the ID + // here as the ID. d.SetId(topic) + return append(diags, resourceNotificationRead(ctx, d, meta)...) } func resourceNotificationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) - gl := flex.ExpandStringSet(d.Get("group_names").(*schema.Set)) - - opts := &autoscaling.DescribeNotificationConfigurationsInput{ - AutoScalingGroupNames: gl, - } - - topic := d.Get("topic_arn").(string) - // Grab all applicable notification configurations for this Topic. - // Each NotificationType will have a record, so 1 Group with 3 Types results - // in 3 records, all with the same Group name - gRaw := make(map[string]bool) - nRaw := make(map[string]bool) - - i := 0 - err := conn.DescribeNotificationConfigurationsPagesWithContext(ctx, opts, func(resp *autoscaling.DescribeNotificationConfigurationsOutput, lastPage bool) bool { - if resp != nil { - i++ - log.Printf("[DEBUG] Paging DescribeNotificationConfigurations for (%s), page: %d", d.Id(), i) - } else { - log.Printf("[DEBUG] Paging finished for DescribeNotificationConfigurations (%s)", d.Id()) - return false - } + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) - for _, n := range resp.NotificationConfigurations { - if n == nil { - continue - } + notifications, err := findNotificationsByTwoPartKey(ctx, conn, flex.ExpandStringValueSet(d.Get("group_names").(*schema.Set)), d.Id()) - if aws.StringValue(n.TopicARN) == topic { - gRaw[aws.StringValue(n.AutoScalingGroupName)] = true - nRaw[aws.StringValue(n.NotificationType)] = true - } - } - return true // return false to stop paging - }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Autoscaling Group Notification (%s): %s", topic, err) + if err == nil && len(notifications) == 0 { + err = tfresource.NewEmptyResultError(nil) } - // Grab the keys here as the list of Groups - var gList []string - for k := range gRaw { - gList = append(gList, k) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] Auto Scaling Notification %s not found, removing from state", d.Id()) + d.SetId("") + return diags } - // Grab the keys here as the list of Types - var nList []string - for k := range nRaw { - nList = append(nList, k) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Auto Scaling Notification (%s): %s", d.Id(), err) } - if err := d.Set("group_names", gList); err != nil { - return sdkdiag.AppendErrorf(diags, "updating Autoscaling Group Notification (%s): %s", topic, err) - } - if err := d.Set("notifications", nList); err != nil { - return sdkdiag.AppendErrorf(diags, "updating Autoscaling Group Notification (%s): %s", topic, err) + // Grab all applicable notification configurations for this Topic. + // Each NotificationType will have a record, so 1 Group with 3 Types results + // in 3 records, all with the same Group name. + gRaw := make(map[string]struct{}) + nRaw := make(map[string]struct{}) + for _, n := range notifications { + gRaw[aws.ToString(n.AutoScalingGroupName)] = struct{}{} + nRaw[aws.ToString(n.NotificationType)] = struct{}{} } + d.Set("group_names", tfmaps.Keys(gRaw)) + d.Set("notifications", tfmaps.Keys(nRaw)) + return diags } func resourceNotificationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) // Notifications API call is a PUT, so we don't need to diff the list, just // push whatever it is and AWS sorts it out @@ -148,7 +123,7 @@ func resourceNotificationUpdate(ctx context.Context, d *schema.ResourceData, met topic := d.Get("topic_arn").(string) if err := removeNotificationConfigToGroupsWithTopic(ctx, conn, remove, topic); err != nil { - return sdkdiag.AppendErrorf(diags, "updating Autoscaling Group Notification (%s): %s", topic, err) + return sdkdiag.AppendErrorf(diags, "updating Auto Scaling Notification (%s): %s", topic, err) } var update []*string @@ -159,54 +134,90 @@ func resourceNotificationUpdate(ctx context.Context, d *schema.ResourceData, met } if err := addNotificationConfigToGroupsWithTopic(ctx, conn, update, nl, topic); err != nil { - return sdkdiag.AppendErrorf(diags, "updating Autoscaling Group Notification (%s): %s", topic, err) + return sdkdiag.AppendErrorf(diags, "updating Auto Scaling Notification (%s): %s", topic, err) } return append(diags, resourceNotificationRead(ctx, d, meta)...) } -func addNotificationConfigToGroupsWithTopic(ctx context.Context, conn *autoscaling.AutoScaling, groups []*string, nl []*string, topic string) error { +func resourceNotificationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) + + topic := d.Get("topic_arn").(string) + if err := removeNotificationConfigToGroupsWithTopic(ctx, conn, flex.ExpandStringSet(d.Get("group_names").(*schema.Set)), topic); err != nil { + return sdkdiag.AppendErrorf(diags, "deleting Auto Scaling Notification (%s): %s", topic, err) + } + + return diags +} + +func addNotificationConfigToGroupsWithTopic(ctx context.Context, conn *autoscaling.Client, groups, notificationTypes []*string, topic string) error { for _, group := range groups { - opts := &autoscaling.PutNotificationConfigurationInput{ + input := &autoscaling.PutNotificationConfigurationInput{ AutoScalingGroupName: group, - NotificationTypes: nl, + NotificationTypes: aws.ToStringSlice(notificationTypes), TopicARN: aws.String(topic), } - _, err := conn.PutNotificationConfigurationWithContext(ctx, opts) + _, err := conn.PutNotificationConfiguration(ctx, input) if err != nil { - return fmt.Errorf("adding notifications for (%s): %w", aws.StringValue(group), err) + return err } } return nil } -func removeNotificationConfigToGroupsWithTopic(ctx context.Context, conn *autoscaling.AutoScaling, groups []*string, topic string) error { +func removeNotificationConfigToGroupsWithTopic(ctx context.Context, conn *autoscaling.Client, groups []*string, topic string) error { for _, group := range groups { - opts := &autoscaling.DeleteNotificationConfigurationInput{ + input := &autoscaling.DeleteNotificationConfigurationInput{ AutoScalingGroupName: group, TopicARN: aws.String(topic), } - _, err := conn.DeleteNotificationConfigurationWithContext(ctx, opts) + _, err := conn.DeleteNotificationConfiguration(ctx, input) + + if tfawserr.ErrMessageContains(err, errCodeValidationError, "doesn't exist") { + continue + } + if err != nil { - return fmt.Errorf("removing notifications for (%s): %w", aws.StringValue(group), err) + return err } } + return nil } -func resourceNotificationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) +func findNotificationsByTwoPartKey(ctx context.Context, conn *autoscaling.Client, groups []string, topic string) ([]awstypes.NotificationConfiguration, error) { + input := &autoscaling.DescribeNotificationConfigurationsInput{ + AutoScalingGroupNames: groups, + } - gl := flex.ExpandStringSet(d.Get("group_names").(*schema.Set)) + return findNotifications(ctx, conn, input, func(v *awstypes.NotificationConfiguration) bool { + return aws.ToString(v.TopicARN) == topic + }) +} - topic := d.Get("topic_arn").(string) - if err := removeNotificationConfigToGroupsWithTopic(ctx, conn, gl, topic); err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Autoscaling Group Notification (%s): %s", topic, err) +func findNotifications(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeNotificationConfigurationsInput, filter tfslices.Predicate[*awstypes.NotificationConfiguration]) ([]awstypes.NotificationConfiguration, error) { + var output []awstypes.NotificationConfiguration + + pages := autoscaling.NewDescribeNotificationConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.NotificationConfigurations { + if filter(&v) { + output = append(output, v) + } + } } - return diags + + return output, nil } diff --git a/internal/service/autoscaling/notification_test.go b/internal/service/autoscaling/notification_test.go index 619b2ab471b..c5bbe863f0f 100644 --- a/internal/service/autoscaling/notification_test.go +++ b/internal/service/autoscaling/notification_test.go @@ -6,220 +6,182 @@ package autoscaling_test import ( "context" "fmt" - "strconv" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfautoscaling "github.com/hashicorp/terraform-provider-aws/internal/service/autoscaling" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccAutoScalingNotification_ASG_basic(t *testing.T) { +func TestAccAutoScalingNotification_basic(t *testing.T) { ctx := acctest.Context(t) - var asgn autoscaling.DescribeNotificationConfigurationsOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_autoscaling_notification.test" + groups := []string{rName} - rName := sdkacctest.RandString(5) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AutoScalingServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckNotificationDestroy(ctx, groups), + Steps: []resource.TestStep{ + { + Config: testAccNotificationConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckNotificationExists(ctx, resourceName, groups), + resource.TestCheckResourceAttr(resourceName, "group_names.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "group_names.*", rName), + resource.TestCheckResourceAttr(resourceName, "notifications.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "notifications.*", "autoscaling:EC2_INSTANCE_LAUNCH"), + resource.TestCheckTypeSetElemAttr(resourceName, "notifications.*", "autoscaling:EC2_INSTANCE_TERMINATE"), + resource.TestCheckResourceAttrSet(resourceName, "topic_arn"), + ), + }, + }, + }) +} + +func TestAccAutoScalingNotification_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_autoscaling_notification.test" + groups := []string{rName} resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AutoScalingServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckASGNDestroy(ctx), + CheckDestroy: testAccCheckNotificationDestroy(ctx, groups), Steps: []resource.TestStep{ { Config: testAccNotificationConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckASGNotificationExists(ctx, "aws_autoscaling_notification.example", []string{"foobar1-terraform-test-" + rName}, &asgn), - testAccCheckASGNotificationAttributes("aws_autoscaling_notification.example", &asgn), + testAccCheckNotificationExists(ctx, resourceName, groups), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfautoscaling.ResourceNotification(), resourceName), ), + ExpectNonEmptyPlan: true, }, }, }) } -func TestAccAutoScalingNotification_ASG_update(t *testing.T) { +func TestAccAutoScalingNotification_update(t *testing.T) { ctx := acctest.Context(t) - var asgn autoscaling.DescribeNotificationConfigurationsOutput - - rName := sdkacctest.RandString(5) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_autoscaling_notification.test" + groups1 := []string{rName} + groups2 := []string{rName, rName + "-2"} resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AutoScalingServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckASGNDestroy(ctx), + CheckDestroy: testAccCheckNotificationDestroy(ctx, groups2), Steps: []resource.TestStep{ { Config: testAccNotificationConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckASGNotificationExists(ctx, "aws_autoscaling_notification.example", []string{"foobar1-terraform-test-" + rName}, &asgn), - testAccCheckASGNotificationAttributes("aws_autoscaling_notification.example", &asgn), + testAccCheckNotificationExists(ctx, resourceName, groups1), + resource.TestCheckResourceAttr(resourceName, "group_names.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "group_names.*", rName), + resource.TestCheckResourceAttr(resourceName, "notifications.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "notifications.*", "autoscaling:EC2_INSTANCE_LAUNCH"), + resource.TestCheckTypeSetElemAttr(resourceName, "notifications.*", "autoscaling:EC2_INSTANCE_TERMINATE"), ), }, { Config: testAccNotificationConfig_update(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckASGNotificationExists(ctx, "aws_autoscaling_notification.example", []string{"foobar1-terraform-test-" + rName, "barfoo-terraform-test-" + rName}, &asgn), - testAccCheckASGNotificationAttributes("aws_autoscaling_notification.example", &asgn), + testAccCheckNotificationExists(ctx, resourceName, groups2), + resource.TestCheckResourceAttr(resourceName, "group_names.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "group_names.*", rName), + resource.TestCheckTypeSetElemAttr(resourceName, "group_names.*", rName+"-2"), + resource.TestCheckResourceAttr(resourceName, "notifications.#", "3"), + resource.TestCheckTypeSetElemAttr(resourceName, "notifications.*", "autoscaling:EC2_INSTANCE_LAUNCH"), + resource.TestCheckTypeSetElemAttr(resourceName, "notifications.*", "autoscaling:EC2_INSTANCE_TERMINATE"), + resource.TestCheckTypeSetElemAttr(resourceName, "notifications.*", "autoscaling:EC2_INSTANCE_LAUNCH_ERROR"), ), }, }, }) } -func TestAccAutoScalingNotification_ASG_pagination(t *testing.T) { +func TestAccAutoScalingNotification_paginated(t *testing.T) { ctx := acctest.Context(t) - var asgn autoscaling.DescribeNotificationConfigurationsOutput - - resourceName := "aws_autoscaling_notification.example" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_autoscaling_notification.test" + var groups []string + for i := 0; i < 20; i++ { + groups = append(groups, fmt.Sprintf("%s-%d", rName, i)) + } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AutoScalingServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckASGNDestroy(ctx), + CheckDestroy: testAccCheckNotificationDestroy(ctx, groups), Steps: []resource.TestStep{ { - Config: testAccNotificationConfig_pagination(), + Config: testAccNotificationConfig_paginated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckASGNotificationExists(ctx, resourceName, - []string{ - "foobar3-terraform-test-0", - "foobar3-terraform-test-1", - "foobar3-terraform-test-2", - "foobar3-terraform-test-3", - "foobar3-terraform-test-4", - "foobar3-terraform-test-5", - "foobar3-terraform-test-6", - "foobar3-terraform-test-7", - "foobar3-terraform-test-8", - "foobar3-terraform-test-9", - "foobar3-terraform-test-10", - "foobar3-terraform-test-11", - "foobar3-terraform-test-12", - "foobar3-terraform-test-13", - "foobar3-terraform-test-14", - "foobar3-terraform-test-15", - "foobar3-terraform-test-16", - "foobar3-terraform-test-17", - "foobar3-terraform-test-18", - "foobar3-terraform-test-19", - }, &asgn), - testAccCheckASGNotificationAttributes(resourceName, &asgn), + testAccCheckNotificationExists(ctx, resourceName, groups), + resource.TestCheckResourceAttr(resourceName, "group_names.#", "20"), + resource.TestCheckResourceAttr(resourceName, "notifications.#", "3"), ), }, }, }) } -func testAccCheckASGNotificationExists(ctx context.Context, n string, groups []string, asgn *autoscaling.DescribeNotificationConfigurationsOutput) resource.TestCheckFunc { +func testAccCheckNotificationExists(ctx context.Context, n string, groups []string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ASG Notification ID is set") - } + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) - opts := &autoscaling.DescribeNotificationConfigurationsInput{ - AutoScalingGroupNames: aws.StringSlice(groups), - MaxRecords: aws.Int64(100), - } + output, err := tfautoscaling.FindNotificationsByTwoPartKey(ctx, conn, groups, rs.Primary.ID) - resp, err := conn.DescribeNotificationConfigurationsWithContext(ctx, opts) - if err != nil { - return fmt.Errorf("Error describing notifications: %s", err) + if err == nil && len(output) == 0 { + err = tfresource.NewEmptyResultError(nil) } - *asgn = *resp - - return nil + return err } } -func testAccCheckASGNDestroy(ctx context.Context) resource.TestCheckFunc { +func testAccCheckNotificationDestroy(ctx context.Context, groups []string) resource.TestCheckFunc { return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_notification" { continue } - groups := []*string{aws.String("foobar1-terraform-test")} - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) - opts := &autoscaling.DescribeNotificationConfigurationsInput{ - AutoScalingGroupNames: groups, - } + output, err := tfautoscaling.FindNotificationsByTwoPartKey(ctx, conn, groups, rs.Primary.ID) - resp, err := conn.DescribeNotificationConfigurationsWithContext(ctx, opts) - if err != nil { - return fmt.Errorf("Error describing notifications") + if err == nil && len(output) == 0 { + err = tfresource.NewEmptyResultError(nil) } - if len(resp.NotificationConfigurations) != 0 { - return fmt.Errorf("Error finding notification descriptions") + if tfresource.NotFound(err) { + continue } - } - return nil - } -} - -func testAccCheckASGNotificationAttributes(n string, asgn *autoscaling.DescribeNotificationConfigurationsOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ASG Notification ID is set") - } - - if len(asgn.NotificationConfigurations) == 0 { - return fmt.Errorf("Error: no ASG Notifications found") - } - // build a unique list of groups, notification types - gRaw := make(map[string]bool) - nRaw := make(map[string]bool) - - for _, n := range asgn.NotificationConfigurations { - if *n.TopicARN == rs.Primary.Attributes["topic_arn"] { - gRaw[*n.AutoScalingGroupName] = true - nRaw[*n.NotificationType] = true + if err != nil { + return err } - } - // Grab the keys here as the list of Groups - var gList []string - for k := range gRaw { - gList = append(gList, k) - } - - // Grab the keys here as the list of Types - var nList []string - for k := range nRaw { - nList = append(nList, k) - } - - typeCount, _ := strconv.Atoi(rs.Primary.Attributes["notifications.#"]) - - if len(nList) != typeCount { - return fmt.Errorf("Error: Bad ASG Notification count, expected (%d), got (%d)", typeCount, len(nList)) - } - - groupCount, _ := strconv.Atoi(rs.Primary.Attributes["group_names.#"]) - - if len(gList) != groupCount { - return fmt.Errorf("Error: Bad ASG Group count, expected (%d), got (%d)", typeCount, len(gList)) + return fmt.Errorf("Auto Scaling Notification %s still exists", rs.Primary.ID) } return nil @@ -227,103 +189,42 @@ func testAccCheckASGNotificationAttributes(n string, asgn *autoscaling.DescribeN } func testAccNotificationConfig_basic(rName string) string { - return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), fmt.Sprintf(` -resource "aws_sns_topic" "topic_example" { - name = "user-updates-topic-%s" -} - -resource "aws_launch_configuration" "foobar" { - name = "foobarautoscaling-terraform-test-%s" - image_id = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - instance_type = "t2.micro" + return acctest.ConfigCompose(testAccGroupConfig_basic(rName), fmt.Sprintf(` +resource "aws_sns_topic" "test" { + name = %[1]q } -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_autoscaling_group" "bar" { - availability_zones = [data.aws_availability_zones.available.names[1]] - name = "foobar1-terraform-test-%s" - max_size = 1 - min_size = 1 - health_check_grace_period = 100 - health_check_type = "ELB" - desired_capacity = 1 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = aws_launch_configuration.foobar.name -} - -resource "aws_autoscaling_notification" "example" { - group_names = [aws_autoscaling_group.bar.name] +resource "aws_autoscaling_notification" "test" { + group_names = [aws_autoscaling_group.test.name] notifications = [ "autoscaling:EC2_INSTANCE_LAUNCH", "autoscaling:EC2_INSTANCE_TERMINATE", ] - topic_arn = aws_sns_topic.topic_example.arn + topic_arn = aws_sns_topic.test.arn } -`, rName, rName, rName)) +`, rName)) } func testAccNotificationConfig_update(rName string) string { - return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), fmt.Sprintf(` -resource "aws_sns_topic" "topic_example" { - name = "user-updates-topic-%s" -} - -resource "aws_launch_configuration" "foobar" { - name = "foobarautoscaling-terraform-test-%s" - image_id = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - instance_type = "t2.micro" + return acctest.ConfigCompose(testAccGroupConfig_basic(rName), fmt.Sprintf(` +resource "aws_sns_topic" "test" { + name = %[1]q } -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } +resource "aws_autoscaling_group" "test2" { + availability_zones = [data.aws_availability_zones.available.names[1]] + max_size = 0 + min_size = 0 + name = "%[1]s-2" + launch_configuration = aws_launch_configuration.test.name } -resource "aws_autoscaling_group" "bar" { - availability_zones = [data.aws_availability_zones.available.names[1]] - name = "foobar1-terraform-test-%s" - max_size = 1 - min_size = 1 - health_check_grace_period = 100 - health_check_type = "ELB" - desired_capacity = 1 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = aws_launch_configuration.foobar.name -} - -resource "aws_autoscaling_group" "foo" { - availability_zones = [data.aws_availability_zones.available.names[2]] - name = "barfoo-terraform-test-%s" - max_size = 1 - min_size = 1 - health_check_grace_period = 200 - health_check_type = "ELB" - desired_capacity = 1 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = aws_launch_configuration.foobar.name -} - -resource "aws_autoscaling_notification" "example" { +resource "aws_autoscaling_notification" "test" { group_names = [ - aws_autoscaling_group.bar.name, - aws_autoscaling_group.foo.name, + aws_autoscaling_group.test.name, + aws_autoscaling_group.test2.name, ] notifications = [ @@ -332,53 +233,35 @@ resource "aws_autoscaling_notification" "example" { "autoscaling:EC2_INSTANCE_LAUNCH_ERROR", ] - topic_arn = aws_sns_topic.topic_example.arn + topic_arn = aws_sns_topic.test.arn } -`, rName, rName, rName, rName)) +`, rName)) } -func testAccNotificationConfig_pagination() string { - return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), ` -resource "aws_sns_topic" "user_updates" { - name = "user-updates-topic" +func testAccNotificationConfig_paginated(rName string) string { + return acctest.ConfigCompose(testAccGroupConfig_launchConfigurationBase(rName, "t2.micro"), fmt.Sprintf(` +resource "aws_sns_topic" "test" { + name = %[1]q } -resource "aws_launch_configuration" "foobar" { - image_id = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - instance_type = "t2.micro" -} - -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} +resource "aws_autoscaling_group" "test" { + count = 20 -resource "aws_autoscaling_group" "bar" { - availability_zones = [data.aws_availability_zones.available.names[1]] - count = 20 - name = "foobar3-terraform-test-${count.index}" - max_size = 1 - min_size = 0 - health_check_grace_period = 300 - health_check_type = "ELB" - desired_capacity = 0 - force_delete = true - termination_policies = ["OldestInstance"] - launch_configuration = aws_launch_configuration.foobar.name + availability_zones = [data.aws_availability_zones.available.names[0]] + max_size = 0 + min_size = 0 + name = "%[1]s-${count.index}" + launch_configuration = aws_launch_configuration.test.name } -resource "aws_autoscaling_notification" "example" { - group_names = aws_autoscaling_group.bar[*].name +resource "aws_autoscaling_notification" "test" { + group_names = aws_autoscaling_group.test[*].name notifications = [ "autoscaling:EC2_INSTANCE_LAUNCH", "autoscaling:EC2_INSTANCE_TERMINATE", "autoscaling:TEST_NOTIFICATION" ] - topic_arn = aws_sns_topic.user_updates.arn -}`) + topic_arn = aws_sns_topic.test.arn +}`, rName)) } diff --git a/internal/service/autoscaling/policy.go b/internal/service/autoscaling/policy.go index 88348077a81..fcd7391b9a8 100644 --- a/internal/service/autoscaling/policy.go +++ b/internal/service/autoscaling/policy.go @@ -11,453 +11,347 @@ import ( "strconv" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" ) -// @SDKResource("aws_autoscaling_policy") -func ResourcePolicy() *schema.Resource { +// @SDKResource("aws_autoscaling_policy", name="Policy") +func resourcePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePolicyCreate, ReadWithoutTimeout: resourcePolicyRead, UpdateWithoutTimeout: resourcePolicyUpdate, DeleteWithoutTimeout: resourcePolicyDelete, + Importer: &schema.ResourceImporter{ StateContext: resourcePolicyImport, }, - Schema: map[string]*schema.Schema{ - "adjustment_type": { - Type: schema.TypeString, - Optional: true, - }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "autoscaling_group_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "cooldown": { - Type: schema.TypeInt, - Optional: true, - }, - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "estimated_instance_warmup": { - Type: schema.TypeInt, - Optional: true, - }, - "metric_aggregation_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "min_adjustment_magnitude": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1), - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "policy_type": { - Type: schema.TypeString, - Optional: true, - Default: PolicyTypeSimpleScaling, // preserve AWS's default to make validation easier. - ValidateFunc: validation.StringInSlice(PolicyType_Values(), false), - }, - "predictive_scaling_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_capacity_breach_behavior": { - Type: schema.TypeString, - Optional: true, - Default: autoscaling.PredictiveScalingMaxCapacityBreachBehaviorHonorMaxCapacity, - ValidateFunc: validation.StringInSlice(autoscaling.PredictiveScalingMaxCapacityBreachBehavior_Values(), false), - }, - "max_capacity_buffer": { - Type: nullable.TypeNullableInt, - Optional: true, - ValidateFunc: nullable.ValidateTypeStringNullableIntBetween(0, 100), - }, - "metric_specification": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "customized_capacity_metric_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.predefined_load_metric_specification"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_data_queries": func() *schema.Schema { - schema := customizedMetricDataQuerySchema() - return schema - }(), - }, - }, - }, - "customized_load_metric_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.predefined_load_metric_specification"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_data_queries": func() *schema.Schema { - schema := customizedMetricDataQuerySchema() - return schema - }(), - }, - }, - }, - "customized_scaling_metric_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.predefined_scaling_metric_specification"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_data_queries": func() *schema.Schema { - schema := customizedMetricDataQuerySchema() - return schema - }(), - }, - }, - }, - "predefined_load_metric_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.customized_load_metric_specification"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "predefined_metric_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(autoscaling.PredefinedLoadMetricType_Values(), false), - }, - "resource_label": { - Type: schema.TypeString, - Optional: true, + SchemaFunc: func() map[string]*schema.Schema { + // All predictive scaling customized metrics shares same metric data query schema + customizedMetricDataQuerySchema := func() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1023), + }, + "id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "label": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2047), + }, + "metric_stat": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dimensions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "metric_name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - "predefined_metric_pair_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "predefined_metric_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(autoscaling.PredefinedMetricPairType_Values(), false), - }, - "resource_label": { - Type: schema.TypeString, - Optional: true, - }, - }, + "stat": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 100), }, - }, - "predefined_scaling_metric_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.customized_scaling_metric_specification"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "predefined_metric_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(autoscaling.PredefinedScalingMetricType_Values(), false), - }, - "resource_label": { - Type: schema.TypeString, - Optional: true, - }, - }, + "unit": { + Type: schema.TypeString, + Optional: true, }, }, - "target_value": { - Type: schema.TypeFloat, - Required: true, - }, }, }, - }, - "mode": { - Type: schema.TypeString, - Optional: true, - Default: autoscaling.PredictiveScalingModeForecastOnly, - ValidateFunc: validation.StringInSlice(autoscaling.PredictiveScalingMode_Values(), false), - }, - "scheduling_buffer_time": { - Type: nullable.TypeNullableInt, - Optional: true, - ValidateFunc: nullable.ValidateTypeStringNullableIntAtLeast(0), + "return_data": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, }, }, + } + } + + return map[string]*schema.Schema{ + "adjustment_type": { + Type: schema.TypeString, + Optional: true, }, - }, - "scaling_adjustment": { - Type: schema.TypeInt, - Optional: true, - ConflictsWith: []string{"step_adjustment"}, - }, - "step_adjustment": { - Type: schema.TypeSet, - Optional: true, - ConflictsWith: []string{"scaling_adjustment"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_interval_lower_bound": { - Type: schema.TypeString, - Optional: true, - }, - "metric_interval_upper_bound": { - Type: schema.TypeString, - Optional: true, - }, - "scaling_adjustment": { - Type: schema.TypeInt, - Required: true, - }, - }, + "arn": { + Type: schema.TypeString, + Computed: true, }, - Set: resourceScalingAdjustmentHash, - }, - "target_tracking_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "customized_metric_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"target_tracking_configuration.0.predefined_metric_specification"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_dimension": { - Type: schema.TypeList, - Optional: true, - ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, + "autoscaling_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cooldown": { + Type: schema.TypeInt, + Optional: true, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "estimated_instance_warmup": { + Type: schema.TypeInt, + Optional: true, + }, + "metric_aggregation_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "min_adjustment_magnitude": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy_type": { + Type: schema.TypeString, + Optional: true, + Default: policyTypeSimpleScaling, // preserve AWS's default to make validation easier. + ValidateDiagFunc: enum.Validate[policyType](), + }, + "predictive_scaling_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_capacity_breach_behavior": { + Type: schema.TypeString, + Optional: true, + Default: awstypes.PredictiveScalingMaxCapacityBreachBehaviorHonorMaxCapacity, + ValidateDiagFunc: enum.Validate[awstypes.PredictiveScalingMaxCapacityBreachBehavior](), + }, + "max_capacity_buffer": { + Type: nullable.TypeNullableInt, + Optional: true, + ValidateFunc: nullable.ValidateTypeStringNullableIntBetween(0, 100), + }, + "metric_specification": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "customized_capacity_metric_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.predefined_load_metric_specification"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_data_queries": customizedMetricDataQuerySchema(), }, }, }, - }, - "metric_name": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, - }, - "metrics": { - Type: schema.TypeSet, - Optional: true, - ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metric_dimension", "target_tracking_configuration.0.customized_metric_specification.0.metric_name", "target_tracking_configuration.0.customized_metric_specification.0.namespace", "target_tracking_configuration.0.customized_metric_specification.0.statistic", "target_tracking_configuration.0.customized_metric_specification.0.unit"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expression": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 2047), + "customized_load_metric_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.predefined_load_metric_specification"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_data_queries": customizedMetricDataQuerySchema(), }, - "id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 255), + }, + }, + "customized_scaling_metric_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.predefined_scaling_metric_specification"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_data_queries": customizedMetricDataQuerySchema(), }, - "label": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 2047), + }, + }, + "predefined_load_metric_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.customized_load_metric_specification"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predefined_metric_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.PredefinedLoadMetricType](), + }, + "resource_label": { + Type: schema.TypeString, + Optional: true, + }, }, - "metric_stat": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dimensions": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "metric_name": { - Type: schema.TypeString, - Required: true, - }, - "namespace": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "stat": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 100), - }, - "unit": { - Type: schema.TypeString, - Optional: true, - }, - }, + }, + }, + "predefined_metric_pair_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predefined_metric_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.PredefinedMetricPairType](), + }, + "resource_label": { + Type: schema.TypeString, + Optional: true, }, }, - "return_data": { - Type: schema.TypeBool, - Optional: true, - Default: true, + }, + }, + "predefined_scaling_metric_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"predictive_scaling_configuration.0.metric_specification.0.customized_scaling_metric_specification"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predefined_metric_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.PredefinedScalingMetricType](), + }, + "resource_label": { + Type: schema.TypeString, + Optional: true, + }, }, }, }, - }, - "namespace": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, - }, - "statistic": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, - }, - "unit": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, + "target_value": { + Type: schema.TypeFloat, + Required: true, + }, }, }, }, - }, - "disable_scale_in": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "predefined_metric_specification": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification"}, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "predefined_metric_type": { - Type: schema.TypeString, - Required: true, - }, - "resource_label": { - Type: schema.TypeString, - Optional: true, - }, - }, + "mode": { + Type: schema.TypeString, + Optional: true, + Default: awstypes.PredictiveScalingModeForecastOnly, + ValidateDiagFunc: enum.Validate[awstypes.PredictiveScalingMode](), + }, + "scheduling_buffer_time": { + Type: nullable.TypeNullableInt, + Optional: true, + ValidateFunc: nullable.ValidateTypeStringNullableIntAtLeast(0), }, - }, - "target_value": { - Type: schema.TypeFloat, - Required: true, }, }, }, - }, - }, - } -} - -// All predictive scaling customized metrics shares same metric data query schema -func customizedMetricDataQuerySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Required: true, - MaxItems: 10, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expression": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 1023), - }, - "id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 255), + "scaling_adjustment": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"step_adjustment"}, }, - "label": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 2047), + "step_adjustment": { + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"scaling_adjustment"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_interval_lower_bound": { + Type: nullable.TypeNullableFloat, + Optional: true, + ValidateFunc: nullable.ValidateTypeStringNullableFloat, + }, + "metric_interval_upper_bound": { + Type: nullable.TypeNullableFloat, + Optional: true, + ValidateFunc: nullable.ValidateTypeStringNullableFloat, + }, + "scaling_adjustment": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceScalingAdjustmentHash, }, - "metric_stat": { + "target_tracking_configuration": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "metric": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, + "customized_metric_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"target_tracking_configuration.0.predefined_metric_specification"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "dimensions": { - Type: schema.TypeSet, - Optional: true, + "metric_dimension": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -472,51 +366,155 @@ func customizedMetricDataQuerySchema() *schema.Schema { }, }, "metric_name": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, + }, + "metrics": { + Type: schema.TypeSet, + Optional: true, + ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metric_dimension", "target_tracking_configuration.0.customized_metric_specification.0.metric_name", "target_tracking_configuration.0.customized_metric_specification.0.namespace", "target_tracking_configuration.0.customized_metric_specification.0.statistic", "target_tracking_configuration.0.customized_metric_specification.0.unit"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2047), + }, + "id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "label": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2047), + }, + "metric_stat": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dimensions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "metric_name": { + Type: schema.TypeString, + Required: true, + }, + "namespace": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "stat": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + "unit": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "return_data": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + }, + }, }, "namespace": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, + }, + "statistic": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, + }, + "unit": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification.0.metrics"}, + }, + }, + }, + }, + "disable_scale_in": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "predefined_metric_specification": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"target_tracking_configuration.0.customized_metric_specification"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predefined_metric_type": { Type: schema.TypeString, Required: true, }, + "resource_label": { + Type: schema.TypeString, + Optional: true, + }, }, }, }, - "stat": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 100), - }, - "unit": { - Type: schema.TypeString, - Optional: true, + "target_value": { + Type: schema.TypeFloat, + Required: true, }, }, }, }, - "return_data": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, + } }, } } func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) name := d.Get("name").(string) - input, err := getPutScalingPolicyInput(d) + input, err := expandPutScalingPolicyInput(d) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Auto Scaling Policy (%s): %s", name, err) } - log.Printf("[DEBUG] Creating Auto Scaling Policy: %s", input) - _, err = conn.PutScalingPolicyWithContext(ctx, input) + _, err = conn.PutScalingPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Auto Scaling Policy (%s): %s", name, err) @@ -529,9 +527,9 @@ func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) - p, err := FindScalingPolicy(ctx, conn, d.Get("autoscaling_group_name").(string), d.Id()) + p, err := findScalingPolicyByTwoPartKey(ctx, conn, d.Get("autoscaling_group_name").(string), d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Auto Scaling Policy %s not found, removing from state", d.Id()) @@ -558,7 +556,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("predictive_scaling_configuration", flattenPredictiveScalingConfig(p.PredictiveScalingConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting predictive_scaling_configuration: %s", err) } - if err := d.Set("step_adjustment", FlattenStepAdjustments(p.StepAdjustments)); err != nil { + if err := d.Set("step_adjustment", flattenStepAdjustments(p.StepAdjustments)); err != nil { return sdkdiag.AppendErrorf(diags, "setting step_adjustment: %s", err) } if err := d.Set("target_tracking_configuration", flattenTargetTrackingConfiguration(p.TargetTrackingConfiguration)); err != nil { @@ -570,16 +568,15 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interf func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) - input, err := getPutScalingPolicyInput(d) + input, err := expandPutScalingPolicyInput(d) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Auto Scaling Policy (%s): %s", d.Id(), err) } - log.Printf("[DEBUG] Updating Auto Scaling Policy: %s", input) - _, err = conn.PutScalingPolicyWithContext(ctx, input) + _, err = conn.PutScalingPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Auto Scaling Policy (%s): %s", d.Id(), err) @@ -590,10 +587,10 @@ func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta inte func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) log.Printf("[INFO] Deleting Auto Scaling Policy: %s", d.Id()) - _, err := conn.DeletePolicyWithContext(ctx, &autoscaling.DeletePolicyInput{ + _, err := conn.DeletePolicy(ctx, &autoscaling.DeletePolicyInput{ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), PolicyName: aws.String(d.Id()), }) @@ -625,143 +622,139 @@ func resourcePolicyImport(ctx context.Context, d *schema.ResourceData, meta inte return []*schema.ResourceData{d}, nil } -func FindScalingPolicy(ctx context.Context, conn *autoscaling.AutoScaling, asgName, policyName string) (*autoscaling.ScalingPolicy, error) { - input := &autoscaling.DescribePoliciesInput{ - AutoScalingGroupName: aws.String(asgName), - PolicyNames: aws.StringSlice([]string{policyName}), +func findScalingPolicy(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribePoliciesInput) (*awstypes.ScalingPolicy, error) { + output, err := findScalingPolicies(ctx, conn, input) + + if err != nil { + return nil, err } - var output []*autoscaling.ScalingPolicy - err := conn.DescribePoliciesPagesWithContext(ctx, input, func(page *autoscaling.DescribePoliciesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + return tfresource.AssertSingleValueResult(output) +} - for _, v := range page.ScalingPolicies { - if v == nil || aws.StringValue(v.PolicyName) != policyName { - continue - } +func findScalingPolicies(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribePoliciesInput) ([]awstypes.ScalingPolicy, error) { + var output []awstypes.ScalingPolicy - output = append(output, v) - } + pages := autoscaling.NewDescribePoliciesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - return !lastPage - }) + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ScalingPolicies...) } - if len(output) == 0 || output[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } + return output, nil +} - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) +func findScalingPolicyByTwoPartKey(ctx context.Context, conn *autoscaling.Client, asgName, policyName string) (*awstypes.ScalingPolicy, error) { + input := &autoscaling.DescribePoliciesInput{ + AutoScalingGroupName: aws.String(asgName), + PolicyNames: []string{policyName}, } - return output[0], nil + return findScalingPolicy(ctx, conn, input) } // PutScalingPolicy can safely resend all parameters without destroying the // resource, so create and update can share this common function. It will error // if certain mutually exclusive values are set. -func getPutScalingPolicyInput(d *schema.ResourceData) (*autoscaling.PutScalingPolicyInput, error) { - var params = &autoscaling.PutScalingPolicyInput{ +func expandPutScalingPolicyInput(d *schema.ResourceData) (*autoscaling.PutScalingPolicyInput, error) { + input := &autoscaling.PutScalingPolicyInput{ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), Enabled: aws.Bool(d.Get("enabled").(bool)), PolicyName: aws.String(d.Get("name").(string)), } // get policy_type first as parameter support depends on policy type - policyType := d.Get("policy_type") - params.PolicyType = aws.String(policyType.(string)) + policyType := policyType(d.Get("policy_type").(string)) + input.PolicyType = aws.String(string(policyType)) // This parameter is supported if the policy type is SimpleScaling or StepScaling. - if v, ok := d.GetOk("adjustment_type"); ok && (policyType == PolicyTypeSimpleScaling || policyType == PolicyTypeStepScaling) { - params.AdjustmentType = aws.String(v.(string)) + if v, ok := d.GetOk("adjustment_type"); ok && (policyType == policyTypeSimpleScaling || policyType == policyTypeStepScaling) { + input.AdjustmentType = aws.String(v.(string)) } if predictiveScalingConfigFlat := d.Get("predictive_scaling_configuration").([]interface{}); len(predictiveScalingConfigFlat) > 0 { - params.PredictiveScalingConfiguration = expandPredictiveScalingConfig(predictiveScalingConfigFlat) + input.PredictiveScalingConfiguration = expandPredictiveScalingConfig(predictiveScalingConfigFlat) } // This parameter is supported if the policy type is SimpleScaling. if v, ok := d.GetOkExists("cooldown"); ok { // 0 is allowed as placeholder even if policyType is not supported - params.Cooldown = aws.Int64(int64(v.(int))) - if v.(int) != 0 && policyType != PolicyTypeSimpleScaling { - return params, fmt.Errorf("cooldown is only supported for policy type SimpleScaling") + input.Cooldown = aws.Int32(int32(v.(int))) + if v.(int) != 0 && policyType != policyTypeSimpleScaling { + return input, fmt.Errorf("cooldown is only supported for policy type SimpleScaling") } } // This parameter is supported if the policy type is StepScaling or TargetTrackingScaling. if v, ok := d.GetOkExists("estimated_instance_warmup"); ok { // 0 is NOT allowed as placeholder if policyType is not supported - if policyType == PolicyTypeStepScaling || policyType == PolicyTypeTargetTrackingScaling { - params.EstimatedInstanceWarmup = aws.Int64(int64(v.(int))) + if policyType == policyTypeStepScaling || policyType == policyTypeTargetTrackingScaling { + input.EstimatedInstanceWarmup = aws.Int32(int32(v.(int))) } - if v.(int) != 0 && policyType != PolicyTypeStepScaling && policyType != PolicyTypeTargetTrackingScaling { - return params, fmt.Errorf("estimated_instance_warmup is only supported for policy type StepScaling and TargetTrackingScaling") + if v.(int) != 0 && policyType != policyTypeStepScaling && policyType != policyTypeTargetTrackingScaling { + return input, fmt.Errorf("estimated_instance_warmup is only supported for policy type StepScaling and TargetTrackingScaling") } } // This parameter is supported if the policy type is StepScaling. - if v, ok := d.GetOk("metric_aggregation_type"); ok && policyType == PolicyTypeStepScaling { - params.MetricAggregationType = aws.String(v.(string)) + if v, ok := d.GetOk("metric_aggregation_type"); ok && policyType == policyTypeStepScaling { + input.MetricAggregationType = aws.String(v.(string)) } // MinAdjustmentMagnitude is supported if the policy type is SimpleScaling or StepScaling. - if v, ok := d.GetOkExists("min_adjustment_magnitude"); ok && v.(int) != 0 && (policyType == PolicyTypeSimpleScaling || policyType == PolicyTypeStepScaling) { - params.MinAdjustmentMagnitude = aws.Int64(int64(v.(int))) + if v, ok := d.GetOkExists("min_adjustment_magnitude"); ok && v.(int) != 0 && (policyType == policyTypeSimpleScaling || policyType == policyTypeStepScaling) { + input.MinAdjustmentMagnitude = aws.Int32(int32(v.(int))) } // This parameter is required if the policy type is SimpleScaling and not supported otherwise. //if policy_type=="SimpleScaling" then scaling_adjustment is required and 0 is allowed if v, ok := d.GetOkExists("scaling_adjustment"); ok { // 0 is NOT allowed as placeholder if policyType is not supported - if policyType == PolicyTypeSimpleScaling { - params.ScalingAdjustment = aws.Int64(int64(v.(int))) + if policyType == policyTypeSimpleScaling { + input.ScalingAdjustment = aws.Int32(int32(v.(int))) } - if v.(int) != 0 && policyType != PolicyTypeSimpleScaling { - return params, fmt.Errorf("scaling_adjustment is only supported for policy type SimpleScaling") + if v.(int) != 0 && policyType != policyTypeSimpleScaling { + return input, fmt.Errorf("scaling_adjustment is only supported for policy type SimpleScaling") } - } else if !ok && policyType == PolicyTypeSimpleScaling { - return params, fmt.Errorf("scaling_adjustment is required for policy type SimpleScaling") + } else if !ok && policyType == policyTypeSimpleScaling { + return input, fmt.Errorf("scaling_adjustment is required for policy type SimpleScaling") } // This parameter is required if the policy type is StepScaling and not supported otherwise. - if v, ok := d.GetOk("step_adjustment"); ok { - steps, err := ExpandStepAdjustments(v.(*schema.Set).List()) - if err != nil { - return params, fmt.Errorf("metric_interval_lower_bound and metric_interval_upper_bound must be strings!") + if v, ok := d.GetOk("step_adjustment"); ok && v.(*schema.Set).Len() > 0 { + steps := expandStepAdjustments(v.(*schema.Set).List()) + if len(steps) != 0 && policyType != policyTypeStepScaling { + return input, fmt.Errorf("step_adjustment is only supported for policy type StepScaling") } - params.StepAdjustments = steps - if len(steps) != 0 && policyType != PolicyTypeStepScaling { - return params, fmt.Errorf("step_adjustment is only supported for policy type StepScaling") - } - } else if !ok && policyType == PolicyTypeStepScaling { - return params, fmt.Errorf("step_adjustment is required for policy type StepScaling") + + input.StepAdjustments = expandStepAdjustments(v.(*schema.Set).List()) + } else if !ok && policyType == policyTypeStepScaling { + return input, fmt.Errorf("step_adjustment is required for policy type StepScaling") } // This parameter is required if the policy type is TargetTrackingScaling and not supported otherwise. if v, ok := d.GetOk("target_tracking_configuration"); ok { - params.TargetTrackingConfiguration = expandTargetTrackingConfiguration(v.([]interface{})) - if policyType != PolicyTypeTargetTrackingScaling { - return params, fmt.Errorf("target_tracking_configuration is only supported for policy type TargetTrackingScaling") + input.TargetTrackingConfiguration = expandTargetTrackingConfiguration(v.([]interface{})) + if policyType != policyTypeTargetTrackingScaling { + return input, fmt.Errorf("target_tracking_configuration is only supported for policy type TargetTrackingScaling") } - } else if !ok && policyType == PolicyTypeTargetTrackingScaling { - return params, fmt.Errorf("target_tracking_configuration is required for policy type TargetTrackingScaling") + } else if !ok && policyType == policyTypeTargetTrackingScaling { + return input, fmt.Errorf("target_tracking_configuration is required for policy type TargetTrackingScaling") } - return params, nil + return input, nil } func resourceScalingAdjustmentHash(v interface{}) int { @@ -778,14 +771,14 @@ func resourceScalingAdjustmentHash(v interface{}) int { return create.StringHashcode(buf.String()) } -func expandTargetTrackingConfiguration(configs []interface{}) *autoscaling.TargetTrackingConfiguration { +func expandTargetTrackingConfiguration(configs []interface{}) *awstypes.TargetTrackingConfiguration { if len(configs) < 1 { return nil } config := configs[0].(map[string]interface{}) - result := &autoscaling.TargetTrackingConfiguration{} + result := &awstypes.TargetTrackingConfiguration{} result.TargetValue = aws.Float64(config["target_value"].(float64)) if v, ok := config["disable_scale_in"]; ok { @@ -793,8 +786,8 @@ func expandTargetTrackingConfiguration(configs []interface{}) *autoscaling.Targe } if v, ok := config["predefined_metric_specification"]; ok && len(v.([]interface{})) > 0 { spec := v.([]interface{})[0].(map[string]interface{}) - predSpec := &autoscaling.PredefinedMetricSpecification{ - PredefinedMetricType: aws.String(spec["predefined_metric_type"].(string)), + predSpec := &awstypes.PredefinedMetricSpecification{ + PredefinedMetricType: awstypes.MetricType(spec["predefined_metric_type"].(string)), } if val, ok := spec["resource_label"]; ok && val.(string) != "" { predSpec.ResourceLabel = aws.String(val.(string)) @@ -803,22 +796,22 @@ func expandTargetTrackingConfiguration(configs []interface{}) *autoscaling.Targe } if v, ok := config["customized_metric_specification"]; ok && len(v.([]interface{})) > 0 { spec := v.([]interface{})[0].(map[string]interface{}) - customSpec := &autoscaling.CustomizedMetricSpecification{} + customSpec := &awstypes.CustomizedMetricSpecification{} if val, ok := spec["metrics"].(*schema.Set); ok && val.Len() > 0 { customSpec.Metrics = expandTargetTrackingMetricDataQueries(val.List()) } else { customSpec.Namespace = aws.String(spec["namespace"].(string)) customSpec.MetricName = aws.String(spec["metric_name"].(string)) - customSpec.Statistic = aws.String(spec["statistic"].(string)) + customSpec.Statistic = awstypes.MetricStatistic(spec["statistic"].(string)) if val, ok := spec["unit"]; ok && len(val.(string)) > 0 { customSpec.Unit = aws.String(val.(string)) } if val, ok := spec["metric_dimension"]; ok { dims := val.([]interface{}) - metDimList := make([]*autoscaling.MetricDimension, len(dims)) + metDimList := make([]awstypes.MetricDimension, len(dims)) for i := range metDimList { dim := dims[i].(map[string]interface{}) - md := &autoscaling.MetricDimension{ + md := awstypes.MetricDimension{ Name: aws.String(dim["name"].(string)), Value: aws.String(dim["value"].(string)), } @@ -832,30 +825,30 @@ func expandTargetTrackingConfiguration(configs []interface{}) *autoscaling.Targe return result } -func expandTargetTrackingMetricDataQueries(metricDataQuerySlices []interface{}) []*autoscaling.TargetTrackingMetricDataQuery { +func expandTargetTrackingMetricDataQueries(metricDataQuerySlices []interface{}) []awstypes.TargetTrackingMetricDataQuery { if metricDataQuerySlices == nil || len(metricDataQuerySlices) < 1 { return nil } - metricDataQueries := make([]*autoscaling.TargetTrackingMetricDataQuery, len(metricDataQuerySlices)) + metricDataQueries := make([]awstypes.TargetTrackingMetricDataQuery, len(metricDataQuerySlices)) for i := range metricDataQueries { metricDataQueryFlat := metricDataQuerySlices[i].(map[string]interface{}) - metricDataQuery := &autoscaling.TargetTrackingMetricDataQuery{ + metricDataQuery := awstypes.TargetTrackingMetricDataQuery{ Id: aws.String(metricDataQueryFlat["id"].(string)), } if val, ok := metricDataQueryFlat["metric_stat"]; ok && len(val.([]interface{})) > 0 { metricStatSpec := val.([]interface{})[0].(map[string]interface{}) metricSpec := metricStatSpec["metric"].([]interface{})[0].(map[string]interface{}) - metric := &autoscaling.Metric{ + metric := &awstypes.Metric{ MetricName: aws.String(metricSpec["metric_name"].(string)), Namespace: aws.String(metricSpec["namespace"].(string)), } if v, ok := metricSpec["dimensions"]; ok { dims := v.(*schema.Set).List() - dimList := make([]*autoscaling.MetricDimension, len(dims)) + dimList := make([]awstypes.MetricDimension, len(dims)) for i := range dimList { dim := dims[i].(map[string]interface{}) - md := &autoscaling.MetricDimension{ + md := awstypes.MetricDimension{ Name: aws.String(dim["name"].(string)), Value: aws.String(dim["value"].(string)), } @@ -863,7 +856,7 @@ func expandTargetTrackingMetricDataQueries(metricDataQuerySlices []interface{}) } metric.Dimensions = dimList } - metricStat := &autoscaling.TargetTrackingMetricStat{ + metricStat := &awstypes.TargetTrackingMetricStat{ Metric: metric, Stat: aws.String(metricStatSpec["stat"].(string)), } @@ -886,31 +879,31 @@ func expandTargetTrackingMetricDataQueries(metricDataQuerySlices []interface{}) return metricDataQueries } -func expandPredictiveScalingConfig(predictiveScalingConfigSlice []interface{}) *autoscaling.PredictiveScalingConfiguration { +func expandPredictiveScalingConfig(predictiveScalingConfigSlice []interface{}) *awstypes.PredictiveScalingConfiguration { if predictiveScalingConfigSlice == nil || len(predictiveScalingConfigSlice) < 1 { return nil } predictiveScalingConfigFlat := predictiveScalingConfigSlice[0].(map[string]interface{}) - predictiveScalingConfig := &autoscaling.PredictiveScalingConfiguration{ + predictiveScalingConfig := &awstypes.PredictiveScalingConfiguration{ MetricSpecifications: expandPredictiveScalingMetricSpecifications(predictiveScalingConfigFlat["metric_specification"].([]interface{})), - MaxCapacityBreachBehavior: aws.String(predictiveScalingConfigFlat["max_capacity_breach_behavior"].(string)), - Mode: aws.String(predictiveScalingConfigFlat["mode"].(string)), + MaxCapacityBreachBehavior: awstypes.PredictiveScalingMaxCapacityBreachBehavior(predictiveScalingConfigFlat["max_capacity_breach_behavior"].(string)), + Mode: awstypes.PredictiveScalingMode(predictiveScalingConfigFlat["mode"].(string)), } if v, null, _ := nullable.Int(predictiveScalingConfigFlat["max_capacity_buffer"].(string)).Value(); !null { - predictiveScalingConfig.MaxCapacityBuffer = aws.Int64(v) + predictiveScalingConfig.MaxCapacityBuffer = aws.Int32(int32(v)) } if v, null, _ := nullable.Int(predictiveScalingConfigFlat["scheduling_buffer_time"].(string)).Value(); !null { - predictiveScalingConfig.SchedulingBufferTime = aws.Int64(v) + predictiveScalingConfig.SchedulingBufferTime = aws.Int32(int32(v)) } return predictiveScalingConfig } -func expandPredictiveScalingMetricSpecifications(metricSpecificationsSlice []interface{}) []*autoscaling.PredictiveScalingMetricSpecification { +func expandPredictiveScalingMetricSpecifications(metricSpecificationsSlice []interface{}) []awstypes.PredictiveScalingMetricSpecification { if metricSpecificationsSlice == nil || len(metricSpecificationsSlice) < 1 { return nil } metricSpecificationsFlat := metricSpecificationsSlice[0].(map[string]interface{}) - metricSpecification := &autoscaling.PredictiveScalingMetricSpecification{ + metricSpecification := awstypes.PredictiveScalingMetricSpecification{ CustomizedCapacityMetricSpecification: expandCustomizedCapacityMetricSpecification(metricSpecificationsFlat["customized_capacity_metric_specification"].([]interface{})), CustomizedLoadMetricSpecification: expandCustomizedLoadMetricSpecification(metricSpecificationsFlat["customized_load_metric_specification"].([]interface{})), CustomizedScalingMetricSpecification: expandCustomizedScalingMetricSpecification(metricSpecificationsFlat["customized_scaling_metric_specification"].([]interface{})), @@ -919,16 +912,16 @@ func expandPredictiveScalingMetricSpecifications(metricSpecificationsSlice []int PredefinedScalingMetricSpecification: expandPredefinedScalingMetricSpecification(metricSpecificationsFlat["predefined_scaling_metric_specification"].([]interface{})), TargetValue: aws.Float64(metricSpecificationsFlat["target_value"].(float64)), } - return []*autoscaling.PredictiveScalingMetricSpecification{metricSpecification} + return []awstypes.PredictiveScalingMetricSpecification{metricSpecification} } -func expandPredefinedLoadMetricSpecification(predefinedLoadMetricSpecificationSlice []interface{}) *autoscaling.PredictiveScalingPredefinedLoadMetric { +func expandPredefinedLoadMetricSpecification(predefinedLoadMetricSpecificationSlice []interface{}) *awstypes.PredictiveScalingPredefinedLoadMetric { if predefinedLoadMetricSpecificationSlice == nil || len(predefinedLoadMetricSpecificationSlice) < 1 { return nil } predefinedLoadMetricSpecificationFlat := predefinedLoadMetricSpecificationSlice[0].(map[string]interface{}) - predefinedLoadMetricSpecification := &autoscaling.PredictiveScalingPredefinedLoadMetric{ - PredefinedMetricType: aws.String(predefinedLoadMetricSpecificationFlat["predefined_metric_type"].(string)), + predefinedLoadMetricSpecification := &awstypes.PredictiveScalingPredefinedLoadMetric{ + PredefinedMetricType: awstypes.PredefinedLoadMetricType(predefinedLoadMetricSpecificationFlat["predefined_metric_type"].(string)), } if label, ok := predefinedLoadMetricSpecificationFlat["resource_label"].(string); ok && label != "" { predefinedLoadMetricSpecification.ResourceLabel = aws.String(label) @@ -936,13 +929,13 @@ func expandPredefinedLoadMetricSpecification(predefinedLoadMetricSpecificationSl return predefinedLoadMetricSpecification } -func expandPredefinedMetricPairSpecification(predefinedMetricPairSpecificationSlice []interface{}) *autoscaling.PredictiveScalingPredefinedMetricPair { +func expandPredefinedMetricPairSpecification(predefinedMetricPairSpecificationSlice []interface{}) *awstypes.PredictiveScalingPredefinedMetricPair { if predefinedMetricPairSpecificationSlice == nil || len(predefinedMetricPairSpecificationSlice) < 1 { return nil } predefinedMetricPairSpecificationFlat := predefinedMetricPairSpecificationSlice[0].(map[string]interface{}) - predefinedMetricPairSpecification := &autoscaling.PredictiveScalingPredefinedMetricPair{ - PredefinedMetricType: aws.String(predefinedMetricPairSpecificationFlat["predefined_metric_type"].(string)), + predefinedMetricPairSpecification := &awstypes.PredictiveScalingPredefinedMetricPair{ + PredefinedMetricType: awstypes.PredefinedMetricPairType(predefinedMetricPairSpecificationFlat["predefined_metric_type"].(string)), } if label, ok := predefinedMetricPairSpecificationFlat["resource_label"].(string); ok && label != "" { predefinedMetricPairSpecification.ResourceLabel = aws.String(label) @@ -950,13 +943,13 @@ func expandPredefinedMetricPairSpecification(predefinedMetricPairSpecificationSl return predefinedMetricPairSpecification } -func expandPredefinedScalingMetricSpecification(predefinedScalingMetricSpecificationSlice []interface{}) *autoscaling.PredictiveScalingPredefinedScalingMetric { +func expandPredefinedScalingMetricSpecification(predefinedScalingMetricSpecificationSlice []interface{}) *awstypes.PredictiveScalingPredefinedScalingMetric { if predefinedScalingMetricSpecificationSlice == nil || len(predefinedScalingMetricSpecificationSlice) < 1 { return nil } predefinedScalingMetricSpecificationFlat := predefinedScalingMetricSpecificationSlice[0].(map[string]interface{}) - predefinedScalingMetricSpecification := &autoscaling.PredictiveScalingPredefinedScalingMetric{ - PredefinedMetricType: aws.String(predefinedScalingMetricSpecificationFlat["predefined_metric_type"].(string)), + predefinedScalingMetricSpecification := &awstypes.PredictiveScalingPredefinedScalingMetric{ + PredefinedMetricType: awstypes.PredefinedScalingMetricType(predefinedScalingMetricSpecificationFlat["predefined_metric_type"].(string)), } if label, ok := predefinedScalingMetricSpecificationFlat["resource_label"].(string); ok && label != "" { predefinedScalingMetricSpecification.ResourceLabel = aws.String(label) @@ -964,63 +957,63 @@ func expandPredefinedScalingMetricSpecification(predefinedScalingMetricSpecifica return predefinedScalingMetricSpecification } -func expandCustomizedScalingMetricSpecification(customizedScalingMetricSpecificationSlice []interface{}) *autoscaling.PredictiveScalingCustomizedScalingMetric { +func expandCustomizedScalingMetricSpecification(customizedScalingMetricSpecificationSlice []interface{}) *awstypes.PredictiveScalingCustomizedScalingMetric { if customizedScalingMetricSpecificationSlice == nil || len(customizedScalingMetricSpecificationSlice) < 1 { return nil } customizedScalingMetricSpecificationFlat := customizedScalingMetricSpecificationSlice[0].(map[string]interface{}) - customizedScalingMetricSpecification := &autoscaling.PredictiveScalingCustomizedScalingMetric{ + customizedScalingMetricSpecification := &awstypes.PredictiveScalingCustomizedScalingMetric{ MetricDataQueries: expandMetricDataQueries(customizedScalingMetricSpecificationFlat["metric_data_queries"].([]interface{})), } return customizedScalingMetricSpecification } -func expandCustomizedLoadMetricSpecification(customizedLoadMetricSpecificationSlice []interface{}) *autoscaling.PredictiveScalingCustomizedLoadMetric { +func expandCustomizedLoadMetricSpecification(customizedLoadMetricSpecificationSlice []interface{}) *awstypes.PredictiveScalingCustomizedLoadMetric { if customizedLoadMetricSpecificationSlice == nil || len(customizedLoadMetricSpecificationSlice) < 1 { return nil } customizedLoadMetricSpecificationSliceFlat := customizedLoadMetricSpecificationSlice[0].(map[string]interface{}) - customizedLoadMetricSpecification := &autoscaling.PredictiveScalingCustomizedLoadMetric{ + customizedLoadMetricSpecification := &awstypes.PredictiveScalingCustomizedLoadMetric{ MetricDataQueries: expandMetricDataQueries(customizedLoadMetricSpecificationSliceFlat["metric_data_queries"].([]interface{})), } return customizedLoadMetricSpecification } -func expandCustomizedCapacityMetricSpecification(customizedCapacityMetricSlice []interface{}) *autoscaling.PredictiveScalingCustomizedCapacityMetric { +func expandCustomizedCapacityMetricSpecification(customizedCapacityMetricSlice []interface{}) *awstypes.PredictiveScalingCustomizedCapacityMetric { if customizedCapacityMetricSlice == nil || len(customizedCapacityMetricSlice) < 1 { return nil } customizedCapacityMetricSliceFlat := customizedCapacityMetricSlice[0].(map[string]interface{}) - customizedCapacityMetricSpecification := &autoscaling.PredictiveScalingCustomizedCapacityMetric{ + customizedCapacityMetricSpecification := &awstypes.PredictiveScalingCustomizedCapacityMetric{ MetricDataQueries: expandMetricDataQueries(customizedCapacityMetricSliceFlat["metric_data_queries"].([]interface{})), } return customizedCapacityMetricSpecification } -func expandMetricDataQueries(metricDataQuerySlices []interface{}) []*autoscaling.MetricDataQuery { +func expandMetricDataQueries(metricDataQuerySlices []interface{}) []awstypes.MetricDataQuery { if metricDataQuerySlices == nil || len(metricDataQuerySlices) < 1 { return nil } - metricDataQueries := make([]*autoscaling.MetricDataQuery, len(metricDataQuerySlices)) + metricDataQueries := make([]awstypes.MetricDataQuery, len(metricDataQuerySlices)) for i := range metricDataQueries { metricDataQueryFlat := metricDataQuerySlices[i].(map[string]interface{}) - metricDataQuery := &autoscaling.MetricDataQuery{ + metricDataQuery := awstypes.MetricDataQuery{ Id: aws.String(metricDataQueryFlat["id"].(string)), } if val, ok := metricDataQueryFlat["metric_stat"]; ok && len(val.([]interface{})) > 0 { metricStatSpec := val.([]interface{})[0].(map[string]interface{}) metricSpec := metricStatSpec["metric"].([]interface{})[0].(map[string]interface{}) - metric := &autoscaling.Metric{ + metric := &awstypes.Metric{ MetricName: aws.String(metricSpec["metric_name"].(string)), Namespace: aws.String(metricSpec["namespace"].(string)), } if v, ok := metricSpec["dimensions"]; ok { dims := v.(*schema.Set).List() - dimList := make([]*autoscaling.MetricDimension, len(dims)) + dimList := make([]awstypes.MetricDimension, len(dims)) for i := range dimList { dim := dims[i].(map[string]interface{}) - md := &autoscaling.MetricDimension{ + md := awstypes.MetricDimension{ Name: aws.String(dim["name"].(string)), Value: aws.String(dim["value"].(string)), } @@ -1028,7 +1021,7 @@ func expandMetricDataQueries(metricDataQuerySlices []interface{}) []*autoscaling } metric.Dimensions = dimList } - metricStat := &autoscaling.MetricStat{ + metricStat := &awstypes.MetricStat{ Metric: metric, Stat: aws.String(metricStatSpec["stat"].(string)), } @@ -1051,19 +1044,19 @@ func expandMetricDataQueries(metricDataQuerySlices []interface{}) []*autoscaling return metricDataQueries } -func flattenTargetTrackingConfiguration(config *autoscaling.TargetTrackingConfiguration) []interface{} { +func flattenTargetTrackingConfiguration(config *awstypes.TargetTrackingConfiguration) []interface{} { if config == nil { return []interface{}{} } result := map[string]interface{}{} - result["disable_scale_in"] = aws.BoolValue(config.DisableScaleIn) - result["target_value"] = aws.Float64Value(config.TargetValue) + result["disable_scale_in"] = aws.ToBool(config.DisableScaleIn) + result["target_value"] = aws.ToFloat64(config.TargetValue) if config.PredefinedMetricSpecification != nil { spec := map[string]interface{}{} - spec["predefined_metric_type"] = aws.StringValue(config.PredefinedMetricSpecification.PredefinedMetricType) + spec["predefined_metric_type"] = string(config.PredefinedMetricSpecification.PredefinedMetricType) if config.PredefinedMetricSpecification.ResourceLabel != nil { - spec["resource_label"] = aws.StringValue(config.PredefinedMetricSpecification.ResourceLabel) + spec["resource_label"] = aws.ToString(config.PredefinedMetricSpecification.ResourceLabel) } result["predefined_metric_specification"] = []map[string]interface{}{spec} } @@ -1072,19 +1065,19 @@ func flattenTargetTrackingConfiguration(config *autoscaling.TargetTrackingConfig if config.CustomizedMetricSpecification.Metrics != nil { spec["metrics"] = flattenTargetTrackingMetricDataQueries(config.CustomizedMetricSpecification.Metrics) } else { - spec["metric_name"] = aws.StringValue(config.CustomizedMetricSpecification.MetricName) - spec["namespace"] = aws.StringValue(config.CustomizedMetricSpecification.Namespace) - spec["statistic"] = aws.StringValue(config.CustomizedMetricSpecification.Statistic) + spec["metric_name"] = aws.ToString(config.CustomizedMetricSpecification.MetricName) + spec["namespace"] = aws.ToString(config.CustomizedMetricSpecification.Namespace) + spec["statistic"] = string(config.CustomizedMetricSpecification.Statistic) if config.CustomizedMetricSpecification.Unit != nil { - spec["unit"] = aws.StringValue(config.CustomizedMetricSpecification.Unit) + spec["unit"] = aws.ToString(config.CustomizedMetricSpecification.Unit) } if config.CustomizedMetricSpecification.Dimensions != nil { dimSpec := make([]interface{}, len(config.CustomizedMetricSpecification.Dimensions)) for i := range dimSpec { dim := map[string]interface{}{} rawDim := config.CustomizedMetricSpecification.Dimensions[i] - dim["name"] = aws.StringValue(rawDim.Name) - dim["value"] = aws.StringValue(rawDim.Value) + dim["name"] = aws.ToString(rawDim.Name) + dim["value"] = aws.ToString(rawDim.Value) dimSpec[i] = dim } spec["metric_dimension"] = dimSpec @@ -1095,17 +1088,17 @@ func flattenTargetTrackingConfiguration(config *autoscaling.TargetTrackingConfig return []interface{}{result} } -func flattenTargetTrackingMetricDataQueries(metricDataQueries []*autoscaling.TargetTrackingMetricDataQuery) []interface{} { +func flattenTargetTrackingMetricDataQueries(metricDataQueries []awstypes.TargetTrackingMetricDataQuery) []interface{} { metricDataQueriesSpec := make([]interface{}, len(metricDataQueries)) for i := range metricDataQueriesSpec { metricDataQuery := map[string]interface{}{} rawMetricDataQuery := metricDataQueries[i] - metricDataQuery["id"] = aws.StringValue(rawMetricDataQuery.Id) + metricDataQuery["id"] = aws.ToString(rawMetricDataQuery.Id) if rawMetricDataQuery.Expression != nil { - metricDataQuery["expression"] = aws.StringValue(rawMetricDataQuery.Expression) + metricDataQuery["expression"] = aws.ToString(rawMetricDataQuery.Expression) } if rawMetricDataQuery.Label != nil { - metricDataQuery["label"] = aws.StringValue(rawMetricDataQuery.Label) + metricDataQuery["label"] = aws.ToString(rawMetricDataQuery.Label) } if rawMetricDataQuery.MetricStat != nil { metricStatSpec := map[string]interface{}{} @@ -1117,30 +1110,30 @@ func flattenTargetTrackingMetricDataQueries(metricDataQueries []*autoscaling.Tar for i := range dimSpec { dim := map[string]interface{}{} rawDim := rawMetric.Dimensions[i] - dim["name"] = aws.StringValue(rawDim.Name) - dim["value"] = aws.StringValue(rawDim.Value) + dim["name"] = aws.ToString(rawDim.Name) + dim["value"] = aws.ToString(rawDim.Value) dimSpec[i] = dim } metricSpec["dimensions"] = dimSpec } - metricSpec["metric_name"] = aws.StringValue(rawMetric.MetricName) - metricSpec["namespace"] = aws.StringValue(rawMetric.Namespace) + metricSpec["metric_name"] = aws.ToString(rawMetric.MetricName) + metricSpec["namespace"] = aws.ToString(rawMetric.Namespace) metricStatSpec["metric"] = []map[string]interface{}{metricSpec} - metricStatSpec["stat"] = aws.StringValue(rawMetricStat.Stat) + metricStatSpec["stat"] = aws.ToString(rawMetricStat.Stat) if rawMetricStat.Unit != nil { - metricStatSpec["unit"] = aws.StringValue(rawMetricStat.Unit) + metricStatSpec["unit"] = aws.ToString(rawMetricStat.Unit) } metricDataQuery["metric_stat"] = []map[string]interface{}{metricStatSpec} } if rawMetricDataQuery.ReturnData != nil { - metricDataQuery["return_data"] = aws.BoolValue(rawMetricDataQuery.ReturnData) + metricDataQuery["return_data"] = aws.ToBool(rawMetricDataQuery.ReturnData) } metricDataQueriesSpec[i] = metricDataQuery } return metricDataQueriesSpec } -func flattenPredictiveScalingConfig(predictiveScalingConfig *autoscaling.PredictiveScalingConfiguration) []map[string]interface{} { +func flattenPredictiveScalingConfig(predictiveScalingConfig *awstypes.PredictiveScalingConfiguration) []map[string]interface{} { predictiveScalingConfigFlat := map[string]interface{}{} if predictiveScalingConfig == nil { return nil @@ -1148,28 +1141,24 @@ func flattenPredictiveScalingConfig(predictiveScalingConfig *autoscaling.Predict if predictiveScalingConfig.MetricSpecifications != nil && len(predictiveScalingConfig.MetricSpecifications) > 0 { predictiveScalingConfigFlat["metric_specification"] = flattenPredictiveScalingMetricSpecifications(predictiveScalingConfig.MetricSpecifications) } - if predictiveScalingConfig.Mode != nil { - predictiveScalingConfigFlat["mode"] = aws.StringValue(predictiveScalingConfig.Mode) - } + predictiveScalingConfigFlat["mode"] = string(predictiveScalingConfig.Mode) if predictiveScalingConfig.SchedulingBufferTime != nil { - predictiveScalingConfigFlat["scheduling_buffer_time"] = strconv.FormatInt(aws.Int64Value(predictiveScalingConfig.SchedulingBufferTime), 10) - } - if predictiveScalingConfig.MaxCapacityBreachBehavior != nil { - predictiveScalingConfigFlat["max_capacity_breach_behavior"] = aws.StringValue(predictiveScalingConfig.MaxCapacityBreachBehavior) + predictiveScalingConfigFlat["scheduling_buffer_time"] = strconv.FormatInt(int64(aws.ToInt32(predictiveScalingConfig.SchedulingBufferTime)), 10) } + predictiveScalingConfigFlat["max_capacity_breach_behavior"] = string(predictiveScalingConfig.MaxCapacityBreachBehavior) if predictiveScalingConfig.MaxCapacityBuffer != nil { - predictiveScalingConfigFlat["max_capacity_buffer"] = strconv.FormatInt(aws.Int64Value(predictiveScalingConfig.MaxCapacityBuffer), 10) + predictiveScalingConfigFlat["max_capacity_buffer"] = strconv.FormatInt(int64(aws.ToInt32(predictiveScalingConfig.MaxCapacityBuffer)), 10) } return []map[string]interface{}{predictiveScalingConfigFlat} } -func flattenPredictiveScalingMetricSpecifications(metricSpecification []*autoscaling.PredictiveScalingMetricSpecification) []map[string]interface{} { +func flattenPredictiveScalingMetricSpecifications(metricSpecification []awstypes.PredictiveScalingMetricSpecification) []map[string]interface{} { metricSpecificationFlat := map[string]interface{}{} if metricSpecification == nil || len(metricSpecification) < 1 { return []map[string]interface{}{metricSpecificationFlat} } if metricSpecification[0].TargetValue != nil { - metricSpecificationFlat["target_value"] = aws.Float64Value(metricSpecification[0].TargetValue) + metricSpecificationFlat["target_value"] = aws.ToFloat64(metricSpecification[0].TargetValue) } if metricSpecification[0].CustomizedCapacityMetricSpecification != nil { metricSpecificationFlat["customized_capacity_metric_specification"] = flattenCustomizedCapacityMetricSpecification(metricSpecification[0].CustomizedCapacityMetricSpecification) @@ -1192,37 +1181,37 @@ func flattenPredictiveScalingMetricSpecifications(metricSpecification []*autosca return []map[string]interface{}{metricSpecificationFlat} } -func flattenPredefinedScalingMetricSpecification(predefinedScalingMetricSpecification *autoscaling.PredictiveScalingPredefinedScalingMetric) []map[string]interface{} { +func flattenPredefinedScalingMetricSpecification(predefinedScalingMetricSpecification *awstypes.PredictiveScalingPredefinedScalingMetric) []map[string]interface{} { predefinedScalingMetricSpecificationFlat := map[string]interface{}{} if predefinedScalingMetricSpecification == nil { return []map[string]interface{}{predefinedScalingMetricSpecificationFlat} } - predefinedScalingMetricSpecificationFlat["predefined_metric_type"] = aws.StringValue(predefinedScalingMetricSpecification.PredefinedMetricType) - predefinedScalingMetricSpecificationFlat["resource_label"] = aws.StringValue(predefinedScalingMetricSpecification.ResourceLabel) + predefinedScalingMetricSpecificationFlat["predefined_metric_type"] = string(predefinedScalingMetricSpecification.PredefinedMetricType) + predefinedScalingMetricSpecificationFlat["resource_label"] = aws.ToString(predefinedScalingMetricSpecification.ResourceLabel) return []map[string]interface{}{predefinedScalingMetricSpecificationFlat} } -func flattenPredefinedLoadMetricSpecification(predefinedLoadMetricSpecification *autoscaling.PredictiveScalingPredefinedLoadMetric) []map[string]interface{} { +func flattenPredefinedLoadMetricSpecification(predefinedLoadMetricSpecification *awstypes.PredictiveScalingPredefinedLoadMetric) []map[string]interface{} { predefinedLoadMetricSpecificationFlat := map[string]interface{}{} if predefinedLoadMetricSpecification == nil { return []map[string]interface{}{predefinedLoadMetricSpecificationFlat} } - predefinedLoadMetricSpecificationFlat["predefined_metric_type"] = aws.StringValue(predefinedLoadMetricSpecification.PredefinedMetricType) - predefinedLoadMetricSpecificationFlat["resource_label"] = aws.StringValue(predefinedLoadMetricSpecification.ResourceLabel) + predefinedLoadMetricSpecificationFlat["predefined_metric_type"] = string(predefinedLoadMetricSpecification.PredefinedMetricType) + predefinedLoadMetricSpecificationFlat["resource_label"] = aws.ToString(predefinedLoadMetricSpecification.ResourceLabel) return []map[string]interface{}{predefinedLoadMetricSpecificationFlat} } -func flattenPredefinedMetricPairSpecification(predefinedMetricPairSpecification *autoscaling.PredictiveScalingPredefinedMetricPair) []map[string]interface{} { +func flattenPredefinedMetricPairSpecification(predefinedMetricPairSpecification *awstypes.PredictiveScalingPredefinedMetricPair) []map[string]interface{} { predefinedMetricPairSpecificationFlat := map[string]interface{}{} if predefinedMetricPairSpecification == nil { return []map[string]interface{}{predefinedMetricPairSpecificationFlat} } - predefinedMetricPairSpecificationFlat["predefined_metric_type"] = aws.StringValue(predefinedMetricPairSpecification.PredefinedMetricType) - predefinedMetricPairSpecificationFlat["resource_label"] = aws.StringValue(predefinedMetricPairSpecification.ResourceLabel) + predefinedMetricPairSpecificationFlat["predefined_metric_type"] = string(predefinedMetricPairSpecification.PredefinedMetricType) + predefinedMetricPairSpecificationFlat["resource_label"] = aws.ToString(predefinedMetricPairSpecification.ResourceLabel) return []map[string]interface{}{predefinedMetricPairSpecificationFlat} } -func flattenCustomizedScalingMetricSpecification(customizedScalingMetricSpecification *autoscaling.PredictiveScalingCustomizedScalingMetric) []map[string]interface{} { +func flattenCustomizedScalingMetricSpecification(customizedScalingMetricSpecification *awstypes.PredictiveScalingCustomizedScalingMetric) []map[string]interface{} { customizedScalingMetricSpecificationFlat := map[string]interface{}{} if customizedScalingMetricSpecification == nil { return []map[string]interface{}{customizedScalingMetricSpecificationFlat} @@ -1231,7 +1220,7 @@ func flattenCustomizedScalingMetricSpecification(customizedScalingMetricSpecific return []map[string]interface{}{customizedScalingMetricSpecificationFlat} } -func flattenCustomizedLoadMetricSpecification(customizedLoadMetricSpecification *autoscaling.PredictiveScalingCustomizedLoadMetric) []map[string]interface{} { +func flattenCustomizedLoadMetricSpecification(customizedLoadMetricSpecification *awstypes.PredictiveScalingCustomizedLoadMetric) []map[string]interface{} { customizedLoadMetricSpecificationFlat := map[string]interface{}{} if customizedLoadMetricSpecification == nil { return []map[string]interface{}{customizedLoadMetricSpecificationFlat} @@ -1240,7 +1229,7 @@ func flattenCustomizedLoadMetricSpecification(customizedLoadMetricSpecification return []map[string]interface{}{customizedLoadMetricSpecificationFlat} } -func flattenCustomizedCapacityMetricSpecification(customizedCapacityMetricSpecification *autoscaling.PredictiveScalingCustomizedCapacityMetric) []map[string]interface{} { +func flattenCustomizedCapacityMetricSpecification(customizedCapacityMetricSpecification *awstypes.PredictiveScalingCustomizedCapacityMetric) []map[string]interface{} { customizedCapacityMetricSpecificationFlat := map[string]interface{}{} if customizedCapacityMetricSpecification == nil { return []map[string]interface{}{customizedCapacityMetricSpecificationFlat} @@ -1250,17 +1239,17 @@ func flattenCustomizedCapacityMetricSpecification(customizedCapacityMetricSpecif return []map[string]interface{}{customizedCapacityMetricSpecificationFlat} } -func flattenMetricDataQueries(metricDataQueries []*autoscaling.MetricDataQuery) []interface{} { +func flattenMetricDataQueries(metricDataQueries []awstypes.MetricDataQuery) []interface{} { metricDataQueriesSpec := make([]interface{}, len(metricDataQueries)) for i := range metricDataQueriesSpec { metricDataQuery := map[string]interface{}{} rawMetricDataQuery := metricDataQueries[i] - metricDataQuery["id"] = aws.StringValue(rawMetricDataQuery.Id) + metricDataQuery["id"] = aws.ToString(rawMetricDataQuery.Id) if rawMetricDataQuery.Expression != nil { - metricDataQuery["expression"] = aws.StringValue(rawMetricDataQuery.Expression) + metricDataQuery["expression"] = aws.ToString(rawMetricDataQuery.Expression) } if rawMetricDataQuery.Label != nil { - metricDataQuery["label"] = aws.StringValue(rawMetricDataQuery.Label) + metricDataQuery["label"] = aws.ToString(rawMetricDataQuery.Label) } if rawMetricDataQuery.MetricStat != nil { metricStatSpec := map[string]interface{}{} @@ -1272,25 +1261,86 @@ func flattenMetricDataQueries(metricDataQueries []*autoscaling.MetricDataQuery) for i := range dimSpec { dim := map[string]interface{}{} rawDim := rawMetric.Dimensions[i] - dim["name"] = aws.StringValue(rawDim.Name) - dim["value"] = aws.StringValue(rawDim.Value) + dim["name"] = aws.ToString(rawDim.Name) + dim["value"] = aws.ToString(rawDim.Value) dimSpec[i] = dim } metricSpec["dimensions"] = dimSpec } - metricSpec["metric_name"] = aws.StringValue(rawMetric.MetricName) - metricSpec["namespace"] = aws.StringValue(rawMetric.Namespace) + metricSpec["metric_name"] = aws.ToString(rawMetric.MetricName) + metricSpec["namespace"] = aws.ToString(rawMetric.Namespace) metricStatSpec["metric"] = []map[string]interface{}{metricSpec} - metricStatSpec["stat"] = aws.StringValue(rawMetricStat.Stat) + metricStatSpec["stat"] = aws.ToString(rawMetricStat.Stat) if rawMetricStat.Unit != nil { - metricStatSpec["unit"] = aws.StringValue(rawMetricStat.Unit) + metricStatSpec["unit"] = aws.ToString(rawMetricStat.Unit) } metricDataQuery["metric_stat"] = []map[string]interface{}{metricStatSpec} } if rawMetricDataQuery.ReturnData != nil { - metricDataQuery["return_data"] = aws.BoolValue(rawMetricDataQuery.ReturnData) + metricDataQuery["return_data"] = aws.ToBool(rawMetricDataQuery.ReturnData) } metricDataQueriesSpec[i] = metricDataQuery } return metricDataQueriesSpec } + +func expandStepAdjustments(tfList []interface{}) []awstypes.StepAdjustment { + if len(tfList) == 0 { + return nil + } + + var apiObjects []awstypes.StepAdjustment + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + apiObject := awstypes.StepAdjustment{ + ScalingAdjustment: aws.Int32(int32(tfMap["scaling_adjustment"].(int))), + } + + if v, ok := tfMap["metric_interval_lower_bound"].(string); ok { + if v, null, _ := nullable.Float(v).Value(); !null { + apiObject.MetricIntervalLowerBound = aws.Float64(v) + } + } + + if v, ok := tfMap["metric_interval_upper_bound"].(string); ok { + if v, null, _ := nullable.Float(v).Value(); !null { + apiObject.MetricIntervalUpperBound = aws.Float64(v) + } + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects +} + +func flattenStepAdjustments(apiObjects []awstypes.StepAdjustment) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "scaling_adjustment": aws.ToInt32(apiObject.ScalingAdjustment), + } + + if v := apiObject.MetricIntervalUpperBound; v != nil { + tfMap["metric_interval_upper_bound"] = flex.Float64ToStringValue(v) + } + + if v := apiObject.MetricIntervalLowerBound; v != nil { + tfMap["metric_interval_lower_bound"] = flex.Float64ToStringValue(v) + } + + tfList = append(tfList, tfMap) + } + + return tfList +} diff --git a/internal/service/autoscaling/policy_test.go b/internal/service/autoscaling/policy_test.go index f5cefac3e14..e2cd0a91bb7 100644 --- a/internal/service/autoscaling/policy_test.go +++ b/internal/service/autoscaling/policy_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccAutoScalingPolicy_basic(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceSimpleName := "aws_autoscaling_policy.test_simple" resourceStepName := "aws_autoscaling_policy.test_step" @@ -119,7 +119,7 @@ func TestAccAutoScalingPolicy_basic(t *testing.T) { func TestAccAutoScalingPolicy_disappears(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_policy.test_simple" @@ -143,7 +143,7 @@ func TestAccAutoScalingPolicy_disappears(t *testing.T) { func TestAccAutoScalingPolicy_predictiveScalingPredefined(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceSimpleName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -180,7 +180,7 @@ func TestAccAutoScalingPolicy_predictiveScalingPredefined(t *testing.T) { func TestAccAutoScalingPolicy_predictiveScalingResourceLabel(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceSimpleName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -217,7 +217,7 @@ func TestAccAutoScalingPolicy_predictiveScalingResourceLabel(t *testing.T) { func TestAccAutoScalingPolicy_predictiveScalingCustom(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -270,7 +270,7 @@ func TestAccAutoScalingPolicy_predictiveScalingCustom(t *testing.T) { func TestAccAutoScalingPolicy_predictiveScalingRemoved(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -306,7 +306,7 @@ func TestAccAutoScalingPolicy_predictiveScalingRemoved(t *testing.T) { func TestAccAutoScalingPolicy_predictiveScalingUpdated(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -358,7 +358,7 @@ func TestAccAutoScalingPolicy_predictiveScalingUpdated(t *testing.T) { func TestAccAutoScalingPolicy_predictiveScalingFloatTargetValue(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceSimpleName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -389,7 +389,7 @@ func TestAccAutoScalingPolicy_predictiveScalingFloatTargetValue(t *testing.T) { func TestAccAutoScalingPolicy_simpleScalingStepAdjustment(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -419,7 +419,7 @@ func TestAccAutoScalingPolicy_simpleScalingStepAdjustment(t *testing.T) { func TestAccAutoScalingPolicy_TargetTrack_predefined(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -447,7 +447,7 @@ func TestAccAutoScalingPolicy_TargetTrack_predefined(t *testing.T) { func TestAccAutoScalingPolicy_TargetTrack_custom(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -475,7 +475,7 @@ func TestAccAutoScalingPolicy_TargetTrack_custom(t *testing.T) { func TestAccAutoScalingPolicy_TargetTrack_metricMath(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScalingPolicy + var v awstypes.ScalingPolicy resourceName := "aws_autoscaling_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -503,7 +503,7 @@ func TestAccAutoScalingPolicy_TargetTrack_metricMath(t *testing.T) { func TestAccAutoScalingPolicy_zeroValue(t *testing.T) { ctx := acctest.Context(t) - var v1, v2 autoscaling.ScalingPolicy + var v1, v2 awstypes.ScalingPolicy rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceSimpleName := "aws_autoscaling_policy.test_simple" resourceStepName := "aws_autoscaling_policy.test_step" @@ -541,20 +541,16 @@ func TestAccAutoScalingPolicy_zeroValue(t *testing.T) { }) } -func testAccCheckScalingPolicyExists(ctx context.Context, n string, v *autoscaling.ScalingPolicy) resource.TestCheckFunc { +func testAccCheckScalingPolicyExists(ctx context.Context, n string, v *awstypes.ScalingPolicy) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Auto Scaling Policy ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) - output, err := tfautoscaling.FindScalingPolicy(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) + output, err := tfautoscaling.FindScalingPolicyByTwoPartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) if err != nil { return err @@ -568,14 +564,14 @@ func testAccCheckScalingPolicyExists(ctx context.Context, n string, v *autoscali func testAccCheckPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_policy" { continue } - _, err := tfautoscaling.FindScalingPolicy(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) + _, err := tfautoscaling.FindScalingPolicyByTwoPartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) if tfresource.NotFound(err) { continue diff --git a/internal/service/autoscaling/schedule.go b/internal/service/autoscaling/schedule.go index cd81f54910f..d0fff15c84c 100644 --- a/internal/service/autoscaling/schedule.go +++ b/internal/service/autoscaling/schedule.go @@ -10,9 +10,10 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -23,8 +24,8 @@ import ( const ScheduleTimeLayout = "2006-01-02T15:04:05Z" -// @SDKResource("aws_autoscaling_schedule") -func ResourceSchedule() *schema.Resource { +// @SDKResource("aws_autoscaling_schedule", name="Scheduled Action") +func resourceSchedule() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSchedulePut, ReadWithoutTimeout: resourceScheduleRead, @@ -93,7 +94,7 @@ func ResourceSchedule() *schema.Resource { func resourceSchedulePut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) name := d.Get("scheduled_action_name").(string) input := &autoscaling.PutScheduledUpdateGroupActionInput{ @@ -127,36 +128,37 @@ func resourceSchedulePut(ctx context.Context, d *schema.ResourceData, meta inter // autoscaling rules. Since Terraform doesn't have a great pattern for // differentiating between 0 and unset fields, we accept "-1" to mean "don't // include this parameter in the action". - minSize := int64(d.Get("min_size").(int)) - maxSize := int64(d.Get("max_size").(int)) - desiredCapacity := int64(d.Get("desired_capacity").(int)) + minSize := int32(d.Get("min_size").(int)) + maxSize := int32(d.Get("max_size").(int)) + desiredCapacity := int32(d.Get("desired_capacity").(int)) if minSize != -1 { - input.MinSize = aws.Int64(minSize) + input.MinSize = aws.Int32(minSize) } if maxSize != -1 { - input.MaxSize = aws.Int64(maxSize) + input.MaxSize = aws.Int32(maxSize) } if desiredCapacity != -1 { - input.DesiredCapacity = aws.Int64(desiredCapacity) + input.DesiredCapacity = aws.Int32(desiredCapacity) } - log.Printf("[INFO] Putting Auto Scaling Scheduled Action: %s", input) - _, err := conn.PutScheduledUpdateGroupActionWithContext(ctx, input) + _, err := conn.PutScheduledUpdateGroupAction(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Auto Scaling Scheduled Action (%s): %s", name, err) + return sdkdiag.AppendErrorf(diags, "putting Auto Scaling Scheduled Action (%s): %s", name, err) } - d.SetId(name) + if d.IsNewResource() { + d.SetId(name) + } return append(diags, resourceScheduleRead(ctx, d, meta)...) } func resourceScheduleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) - sa, err := FindScheduledUpdateGroupAction(ctx, conn, d.Get("autoscaling_group_name").(string), d.Id()) + sa, err := findScheduleByTwoPartKey(ctx, conn, d.Get("autoscaling_group_name").(string), d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Auto Scaling Scheduled Action %s not found, removing from state", d.Id()) @@ -199,10 +201,10 @@ func resourceScheduleRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceScheduleDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) log.Printf("[INFO] Deleting Auto Scaling Scheduled Action: %s", d.Id()) - _, err := conn.DeleteScheduledActionWithContext(ctx, &autoscaling.DeleteScheduledActionInput{ + _, err := conn.DeleteScheduledAction(ctx, &autoscaling.DeleteScheduledActionInput{ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)), ScheduledActionName: aws.String(d.Id()), }) @@ -239,49 +241,47 @@ func resourceScheduleImport(ctx context.Context, d *schema.ResourceData, meta in return []*schema.ResourceData{d}, nil } -func FindScheduledUpdateGroupAction(ctx context.Context, conn *autoscaling.AutoScaling, asgName, actionName string) (*autoscaling.ScheduledUpdateGroupAction, error) { - input := &autoscaling.DescribeScheduledActionsInput{ - AutoScalingGroupName: aws.String(asgName), - ScheduledActionNames: aws.StringSlice([]string{actionName}), +func findSchedule(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeScheduledActionsInput) (*awstypes.ScheduledUpdateGroupAction, error) { + output, err := findSchedules(ctx, conn, input) + + if err != nil { + return nil, err } - var output []*autoscaling.ScheduledUpdateGroupAction - err := conn.DescribeScheduledActionsPagesWithContext(ctx, input, func(page *autoscaling.DescribeScheduledActionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + return tfresource.AssertSingleValueResult(output) +} - for _, v := range page.ScheduledUpdateGroupActions { - if v == nil || aws.StringValue(v.ScheduledActionName) != actionName { - continue - } +func findSchedules(ctx context.Context, conn *autoscaling.Client, input *autoscaling.DescribeScheduledActionsInput) ([]awstypes.ScheduledUpdateGroupAction, error) { + var output []awstypes.ScheduledUpdateGroupAction - output = append(output, v) - } + pages := autoscaling.NewDescribeScheduledActionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - return !lastPage - }) + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } - if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ScheduledUpdateGroupActions...) } - if len(output) == 0 || output[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } + return output, nil +} - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) +func findScheduleByTwoPartKey(ctx context.Context, conn *autoscaling.Client, asgName, actionName string) (*awstypes.ScheduledUpdateGroupAction, error) { + input := &autoscaling.DescribeScheduledActionsInput{ + AutoScalingGroupName: aws.String(asgName), + ScheduledActionNames: []string{actionName}, } - return output[0], nil + return findSchedule(ctx, conn, input) } func validScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) { diff --git a/internal/service/autoscaling/schedule_test.go b/internal/service/autoscaling/schedule_test.go index b9ec9c7b6d5..f8b0cd09f66 100644 --- a/internal/service/autoscaling/schedule_test.go +++ b/internal/service/autoscaling/schedule_test.go @@ -9,8 +9,8 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,7 +23,7 @@ import ( func TestAccAutoScalingSchedule_basic(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScheduledUpdateGroupAction + var v awstypes.ScheduledUpdateGroupAction rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) startTime := testAccScheduleValidStart(t) @@ -54,7 +54,7 @@ func TestAccAutoScalingSchedule_basic(t *testing.T) { func TestAccAutoScalingSchedule_disappears(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScheduledUpdateGroupAction + var v awstypes.ScheduledUpdateGroupAction rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) startTime := testAccScheduleValidStart(t) @@ -81,7 +81,7 @@ func TestAccAutoScalingSchedule_disappears(t *testing.T) { func TestAccAutoScalingSchedule_recurrence(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScheduledUpdateGroupAction + var v awstypes.ScheduledUpdateGroupAction rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_autoscaling_schedule.test" @@ -110,7 +110,7 @@ func TestAccAutoScalingSchedule_recurrence(t *testing.T) { func TestAccAutoScalingSchedule_zeroValues(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScheduledUpdateGroupAction + var v awstypes.ScheduledUpdateGroupAction rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) startTime := testAccScheduleValidStart(t) endTime := testAccScheduleValidEnd(t) @@ -140,7 +140,7 @@ func TestAccAutoScalingSchedule_zeroValues(t *testing.T) { func TestAccAutoScalingSchedule_negativeOne(t *testing.T) { ctx := acctest.Context(t) - var v autoscaling.ScheduledUpdateGroupAction + var v awstypes.ScheduledUpdateGroupAction rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) startTime := testAccScheduleValidStart(t) endTime := testAccScheduleValidEnd(t) @@ -171,11 +171,11 @@ func TestAccAutoScalingSchedule_negativeOne(t *testing.T) { } func testAccScheduleValidEnd(t *testing.T) string { - return testAccScheduleTime(t, "2h") + return testAccScheduleTime(t, "12h") } func testAccScheduleValidStart(t *testing.T) string { - return testAccScheduleTime(t, "1h") + return testAccScheduleTime(t, "6h") } func testAccScheduleTime(t *testing.T, duration string) string { @@ -187,20 +187,16 @@ func testAccScheduleTime(t *testing.T, duration string) string { return n.Add(d).Format(tfautoscaling.ScheduleTimeLayout) } -func testAccCheckScalingScheduleExists(ctx context.Context, n string, v *autoscaling.ScheduledUpdateGroupAction) resource.TestCheckFunc { +func testAccCheckScalingScheduleExists(ctx context.Context, n string, v *awstypes.ScheduledUpdateGroupAction) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Auto Scaling Scheduled Action ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) - output, err := tfautoscaling.FindScheduledUpdateGroupAction(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) + output, err := tfautoscaling.FindScheduleByTwoPartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) if err != nil { return err @@ -214,14 +210,14 @@ func testAccCheckScalingScheduleExists(ctx context.Context, n string, v *autosca func testAccCheckScheduleDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_schedule" { continue } - _, err := tfautoscaling.FindScheduledUpdateGroupAction(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) + _, err := tfautoscaling.FindScheduleByTwoPartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -238,13 +234,13 @@ func testAccCheckScheduleDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckScalingScheduleHasNoDesiredCapacity(v *autoscaling.ScheduledUpdateGroupAction) resource.TestCheckFunc { +func testAccCheckScalingScheduleHasNoDesiredCapacity(v *awstypes.ScheduledUpdateGroupAction) resource.TestCheckFunc { return func(s *terraform.State) error { if v.DesiredCapacity == nil { return nil } - return fmt.Errorf("Expected not to set desired capacity, got %v", aws.Int64Value(v.DesiredCapacity)) + return fmt.Errorf("Expected not to set desired capacity, got %v", aws.ToInt32(v.DesiredCapacity)) } } @@ -319,8 +315,8 @@ resource "aws_autoscaling_schedule" "test" { max_size = 3 min_size = 1 desired_capacity = -1 - start_time = "%s" - end_time = "%s" + start_time = %[2]q + end_time = %[3]q autoscaling_group_name = aws_autoscaling_group.test.name } `, rName, startTime, endTime)) diff --git a/internal/service/autoscaling/service_endpoints_gen_test.go b/internal/service/autoscaling/service_endpoints_gen_test.go index dc67c874dfb..4fbf987c813 100644 --- a/internal/service/autoscaling/service_endpoints_gen_test.go +++ b/internal/service/autoscaling/service_endpoints_gen_test.go @@ -4,17 +4,17 @@ package autoscaling_test import ( "context" + "errors" "fmt" "maps" - "net/url" "os" "path/filepath" "reflect" "strings" "testing" - "github.com/aws/aws-sdk-go/aws/endpoints" - autoscaling_sdkv1 "github.com/aws/aws-sdk-go/service/autoscaling" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + autoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscaling" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" @@ -212,32 +212,42 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() + r := autoscaling_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(autoscaling_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), autoscaling_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return err.Error() } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI.String() } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { t.Helper() - client := meta.AutoScalingConn(ctx) - - req, _ := client.DescribeAutoScalingGroupsRequest(&autoscaling_sdkv1.DescribeAutoScalingGroupsInput{}) + var endpoint string - req.HTTPRequest.URL.Path = "/" + client := meta.AutoScalingClient(ctx) - endpoint := req.HTTPRequest.URL.String() + _, err := client.DescribeAutoScalingGroups(ctx, &autoscaling_sdkv2.DescribeAutoScalingGroupsInput{}, + func(opts *autoscaling_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &endpoint), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } return endpoint } diff --git a/internal/service/autoscaling/service_package_gen.go b/internal/service/autoscaling/service_package_gen.go index 75dcf6be0c3..6601af1369c 100644 --- a/internal/service/autoscaling/service_package_gen.go +++ b/internal/service/autoscaling/service_package_gen.go @@ -5,9 +5,8 @@ package autoscaling import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - autoscaling_sdkv1 "github.com/aws/aws-sdk-go/service/autoscaling" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + autoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscaling" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -26,16 +25,19 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceGroup, + Factory: dataSourceGroup, TypeName: "aws_autoscaling_group", + Name: "Group", }, { - Factory: DataSourceGroups, + Factory: dataSourceGroups, TypeName: "aws_autoscaling_groups", + Name: "Groups", }, { - Factory: DataSourceLaunchConfiguration, + Factory: dataSourceLaunchConfiguration, TypeName: "aws_launch_configuration", + Name: "Launch Configuration", }, } } @@ -43,39 +45,47 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceAttachment, + Factory: resourceAttachment, TypeName: "aws_autoscaling_attachment", + Name: "Attachment", }, { - Factory: ResourceGroup, + Factory: resourceGroup, TypeName: "aws_autoscaling_group", + Name: "Group", }, { - Factory: ResourceGroupTag, + Factory: resourceGroupTag, TypeName: "aws_autoscaling_group_tag", + Name: "Group Tag", }, { - Factory: ResourceLifecycleHook, + Factory: resourceLifecycleHook, TypeName: "aws_autoscaling_lifecycle_hook", + Name: "Lifecycle Hook", }, { - Factory: ResourceNotification, + Factory: resourceNotification, TypeName: "aws_autoscaling_notification", + Name: "Notification", }, { - Factory: ResourcePolicy, + Factory: resourcePolicy, TypeName: "aws_autoscaling_policy", + Name: "Policy", }, { - Factory: ResourceSchedule, + Factory: resourceSchedule, TypeName: "aws_autoscaling_schedule", + Name: "Scheduled Action", }, { - Factory: ResourceTrafficSourceAttachment, + Factory: resourceTrafficSourceAttachment, TypeName: "aws_autoscaling_traffic_source_attachment", + Name: "Traffic Source Attachment", }, { - Factory: ResourceLaunchConfiguration, + Factory: resourceLaunchConfiguration, TypeName: "aws_launch_configuration", Name: "Launch Configuration", }, @@ -86,11 +96,15 @@ func (p *servicePackage) ServicePackageName() string { return names.AutoScaling } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*autoscaling_sdkv1.AutoScaling, error) { - sess := config["session"].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*autoscaling_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return autoscaling_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil + return autoscaling_sdkv2.NewFromConfig(cfg, func(o *autoscaling_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/autoscaling/sweep.go b/internal/service/autoscaling/sweep.go index dca31163c9b..e84d852477a 100644 --- a/internal/service/autoscaling/sweep.go +++ b/internal/service/autoscaling/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -33,34 +33,32 @@ func sweepGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.AutoScalingConn(ctx) + conn := client.AutoScalingClient(ctx) input := &autoscaling.DescribeAutoScalingGroupsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeAutoScalingGroupsPagesWithContext(ctx, input, func(page *autoscaling.DescribeAutoScalingGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := autoscaling.NewDescribeAutoScalingGroupsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Auto Scaling Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Auto Scaling Groups (%s): %w", region, err) } for _, v := range page.AutoScalingGroups { - r := ResourceGroup() + r := resourceGroup() d := r.Data(nil) - d.SetId(aws.StringValue(v.AutoScalingGroupName)) + d.SetId(aws.ToString(v.AutoScalingGroupName)) d.Set("force_delete", true) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Auto Scaling Group sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing Auto Scaling Groups (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -78,33 +76,31 @@ func sweepLaunchConfigurations(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.AutoScalingConn(ctx) + conn := client.AutoScalingClient(ctx) input := &autoscaling.DescribeLaunchConfigurationsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeLaunchConfigurationsPagesWithContext(ctx, input, func(page *autoscaling.DescribeLaunchConfigurationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := autoscaling.NewDescribeLaunchConfigurationsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Auto Scaling Launch Configuration sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Auto Scaling Launch Configurations (%s): %w", region, err) } for _, v := range page.LaunchConfigurations { - r := ResourceLaunchConfiguration() + r := resourceLaunchConfiguration() d := r.Data(nil) - d.SetId(aws.StringValue(v.LaunchConfigurationName)) + d.SetId(aws.ToString(v.LaunchConfigurationName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Auto Scaling Launch Configuration sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing Auto Scaling Launch Configurations (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/autoscaling/tags_gen.go b/internal/service/autoscaling/tags_gen.go index 9c8753a8fb8..0732e256972 100644 --- a/internal/service/autoscaling/tags_gen.go +++ b/internal/service/autoscaling/tags_gen.go @@ -5,39 +5,40 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) -// GetTag fetches an individual autoscaling service tag for a resource. +// findTag fetches an individual autoscaling service tag for a resource. // Returns whether the key value and any errors. A NotFoundError is used to signal that no value was found. // This function will optimise the handling over listTags, if possible. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func GetTag(ctx context.Context, conn autoscalingiface.AutoScalingAPI, identifier, resourceType, key string) (*tftags.TagData, error) { +func findTag(ctx context.Context, conn *autoscaling.Client, identifier, resourceType, key string, optFns ...func(*autoscaling.Options)) (*tftags.TagData, error) { input := &autoscaling.DescribeTagsInput{ - Filters: []*autoscaling.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("auto-scaling-group"), - Values: []*string{aws.String(identifier)}, + Values: []string{identifier}, }, { Name: aws.String("key"), - Values: []*string{aws.String(key)}, + Values: []string{key}, }, }, } - output, err := conn.DescribeTagsWithContext(ctx, input) + output, err := conn.DescribeTags(ctx, input, optFns...) if err != nil { return nil, err @@ -55,33 +56,28 @@ func GetTag(ctx context.Context, conn autoscalingiface.AutoScalingAPI, identifie // listTags lists autoscaling service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn autoscalingiface.AutoScalingAPI, identifier, resourceType string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *autoscaling.Client, identifier, resourceType string, optFns ...func(*autoscaling.Options)) (tftags.KeyValueTags, error) { input := &autoscaling.DescribeTagsInput{ - Filters: []*autoscaling.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("auto-scaling-group"), - Values: []*string{aws.String(identifier)}, + Values: []string{identifier}, }, }, } - var output []*autoscaling.TagDescription + var output []awstypes.TagDescription - err := conn.DescribeTagsPagesWithContext(ctx, input, func(page *autoscaling.DescribeTagsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := autoscaling.NewDescribeTagsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx, optFns...) + + if err != nil { + return tftags.New(ctx, nil), err } for _, v := range page.Tags { - if v != nil { - output = append(output, v) - } + output = append(output, v) } - - return !lastPage - }) - - if err != nil { - return tftags.New(ctx, nil), err } return KeyValueTags(ctx, output, identifier, resourceType), nil @@ -90,7 +86,7 @@ func listTags(ctx context.Context, conn autoscalingiface.AutoScalingAPI, identif // ListTags lists autoscaling service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, resourceType string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).AutoScalingConn(ctx), identifier, resourceType) + tags, err := listTags(ctx, meta.(*conns.AWSClient).AutoScalingClient(ctx), identifier, resourceType) if err != nil { return err @@ -105,36 +101,28 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res // []*SERVICE.Tag handling -// ListOfMap returns a list of autoscaling in flattened map. +// listOfMap returns a list of autoscaling tags in a flattened map. // // Compatible with setting Terraform state for strongly typed configuration blocks. // // This function strips tag resource identifier and type. Generally, this is // the desired behavior so the tag schema does not require those attributes. -// Use (tftags.KeyValueTags).ListOfMap() for full tag information. -func ListOfMap(tags tftags.KeyValueTags) []any { - var result []any - - for _, key := range tags.Keys() { - m := map[string]any{ - "key": key, - "value": aws.StringValue(tags.KeyValue(key)), - - "propagate_at_launch": aws.BoolValue(tags.KeyAdditionalBoolValue(key, "PropagateAtLaunch")), +func listOfMap(tags tftags.KeyValueTags) []any { + return tfslices.ApplyToAll(tags.Keys(), func(key string) any { + return map[string]any{ + "key": key, + "value": aws.ToString(tags.KeyValue(key)), + "propagate_at_launch": aws.ToBool(tags.KeyAdditionalBoolValue(key, "PropagateAtLaunch")), } - - result = append(result, m) - } - - return result + }) } // Tags returns autoscaling service tags. -func Tags(tags tftags.KeyValueTags) []*autoscaling.Tag { - var result []*autoscaling.Tag +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + var result []awstypes.Tag for _, key := range tags.Keys() { - tag := &autoscaling.Tag{ + tag := awstypes.Tag{ Key: aws.String(key), Value: tags.KeyValue(key), ResourceId: tags.KeyAdditionalStringValue(key, "ResourceId"), @@ -151,13 +139,13 @@ func Tags(tags tftags.KeyValueTags) []*autoscaling.Tag { // KeyValueTags creates tftags.KeyValueTags from autoscaling service tags. // // Accepts the following types: -// - []*autoscaling.Tag -// - []*autoscaling.TagDescription +// - []awstypes.Tag +// - []awstypes.TagDescription // - []any (Terraform TypeList configuration block compatible) // - *schema.Set (Terraform TypeSet configuration block compatible) func KeyValueTags(ctx context.Context, tags any, identifier, resourceType string) tftags.KeyValueTags { switch tags := tags.(type) { - case []*autoscaling.Tag: + case []awstypes.Tag: m := make(map[string]*tftags.TagData, len(tags)) for _, tag := range tags { @@ -171,11 +159,11 @@ func KeyValueTags(ctx context.Context, tags any, identifier, resourceType string tagData.AdditionalStringFields["ResourceId"] = &identifier tagData.AdditionalStringFields["ResourceType"] = &resourceType - m[aws.StringValue(tag.Key)] = tagData + m[aws.ToString(tag.Key)] = tagData } return tftags.New(ctx, m) - case []*autoscaling.TagDescription: + case []awstypes.TagDescription: m := make(map[string]*tftags.TagData, len(tags)) for _, tag := range tags { @@ -188,7 +176,7 @@ func KeyValueTags(ctx context.Context, tags any, identifier, resourceType string tagData.AdditionalStringFields["ResourceId"] = &identifier tagData.AdditionalStringFields["ResourceType"] = &resourceType - m[aws.StringValue(tag.Key)] = tagData + m[aws.ToString(tag.Key)] = tagData } return tftags.New(ctx, m) @@ -236,7 +224,7 @@ func KeyValueTags(ctx context.Context, tags any, identifier, resourceType string // getTagsIn returns autoscaling service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*autoscaling.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -256,7 +244,7 @@ func setTagsOut(ctx context.Context, tags any, identifier, resourceType string) // updateTags updates autoscaling service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn autoscalingiface.AutoScalingAPI, identifier, resourceType string, oldTagsSet, newTagsSet any) error { +func updateTags(ctx context.Context, conn *autoscaling.Client, identifier, resourceType string, oldTagsSet, newTagsSet any, optFns ...func(*autoscaling.Options)) error { oldTags := KeyValueTags(ctx, oldTagsSet, identifier, resourceType) newTags := KeyValueTags(ctx, newTagsSet, identifier, resourceType) @@ -269,7 +257,7 @@ func updateTags(ctx context.Context, conn autoscalingiface.AutoScalingAPI, ident Tags: Tags(removedTags), } - _, err := conn.DeleteTagsWithContext(ctx, input) + _, err := conn.DeleteTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -283,7 +271,7 @@ func updateTags(ctx context.Context, conn autoscalingiface.AutoScalingAPI, ident Tags: Tags(updatedTags), } - _, err := conn.CreateOrUpdateTagsWithContext(ctx, input) + _, err := conn.CreateOrUpdateTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -296,5 +284,5 @@ func updateTags(ctx context.Context, conn autoscalingiface.AutoScalingAPI, ident // UpdateTags updates autoscaling service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier, resourceType string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).AutoScalingConn(ctx), identifier, resourceType, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).AutoScalingClient(ctx), identifier, resourceType, oldTags, newTags) } diff --git a/internal/service/autoscaling/traffic_source_attachment.go b/internal/service/autoscaling/traffic_source_attachment.go index d1d864ada55..bf57ae493a5 100644 --- a/internal/service/autoscaling/traffic_source_attachment.go +++ b/internal/service/autoscaling/traffic_source_attachment.go @@ -10,8 +10,10 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -22,8 +24,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_autoscaling_traffic_source_attachment") -func ResourceTrafficSourceAttachment() *schema.Resource { +// @SDKResource("aws_autoscaling_traffic_source_attachment", name="Traffic Source Attachment") +func resourceTrafficSourceAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTrafficSourceAttachmentCreate, ReadWithoutTimeout: resourceTrafficSourceAttachmentRead, @@ -68,19 +70,19 @@ func ResourceTrafficSourceAttachment() *schema.Resource { func resourceTrafficSourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) asgName := d.Get("autoscaling_group_name").(string) trafficSource := expandTrafficSourceIdentifier(d.Get("traffic_source").([]interface{})[0].(map[string]interface{})) - trafficSourceID := aws.StringValue(trafficSource.Identifier) - trafficSourceType := aws.StringValue(trafficSource.Type) + trafficSourceID := aws.ToString(trafficSource.Identifier) + trafficSourceType := aws.ToString(trafficSource.Type) id := trafficSourceAttachmentCreateResourceID(asgName, trafficSourceType, trafficSourceID) input := &autoscaling.AttachTrafficSourcesInput{ AutoScalingGroupName: aws.String(asgName), - TrafficSources: []*autoscaling.TrafficSourceIdentifier{trafficSource}, + TrafficSources: []awstypes.TrafficSourceIdentifier{trafficSource}, } - _, err := conn.AttachTrafficSourcesWithContext(ctx, input) + _, err := conn.AttachTrafficSources(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Auto Scaling Traffic Source Attachment (%s): %s", id, err) @@ -97,15 +99,14 @@ func resourceTrafficSourceAttachmentCreate(ctx context.Context, d *schema.Resour func resourceTrafficSourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) - - asgName, trafficSourceType, trafficSourceID, err := TrafficSourceAttachmentParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) + asgName, trafficSourceType, trafficSourceID, err := trafficSourceAttachmentParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - _, err = FindTrafficSourceAttachmentByThreePartKey(ctx, conn, asgName, trafficSourceType, trafficSourceID) + _, err = findTrafficSourceAttachmentByThreePartKey(ctx, conn, asgName, trafficSourceType, trafficSourceID) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Auto Scaling Traffic Source Attachment (%s) not found, removing from state", d.Id()) @@ -122,20 +123,25 @@ func resourceTrafficSourceAttachmentRead(ctx context.Context, d *schema.Resource func resourceTrafficSourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AutoScalingConn(ctx) - - asgName, trafficSourceType, trafficSourceID, err := TrafficSourceAttachmentParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).AutoScalingClient(ctx) + asgName, trafficSourceType, trafficSourceID, err := trafficSourceAttachmentParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } + trafficSource := expandTrafficSourceIdentifier(d.Get("traffic_source").([]interface{})[0].(map[string]interface{})) + log.Printf("[INFO] Deleting Auto Scaling Traffic Source Attachment: %s", d.Id()) - _, err = conn.DetachTrafficSourcesWithContext(ctx, &autoscaling.DetachTrafficSourcesInput{ + _, err = conn.DetachTrafficSources(ctx, &autoscaling.DetachTrafficSourcesInput{ AutoScalingGroupName: aws.String(asgName), - TrafficSources: []*autoscaling.TrafficSourceIdentifier{expandTrafficSourceIdentifier(d.Get("traffic_source").([]interface{})[0].(map[string]interface{}))}, + TrafficSources: []awstypes.TrafficSourceIdentifier{trafficSource}, }) + if tfawserr.ErrMessageContains(err, errCodeValidationError, "not found") { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Auto Scaling Traffic Source Attachment (%s): %s", d.Id(), err) } @@ -156,7 +162,7 @@ func trafficSourceAttachmentCreateResourceID(asgName, trafficSourceType, traffic return id } -func TrafficSourceAttachmentParseResourceID(id string) (string, string, string, error) { +func trafficSourceAttachmentParseResourceID(id string) (string, string, string, error) { parts := strings.Split(id, trafficSourceAttachmentIDSeparator) if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { @@ -166,7 +172,7 @@ func TrafficSourceAttachmentParseResourceID(id string) (string, string, string, return "", "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected asg-name%[2]straffic-source-type%[2]straffic-source-id", id, trafficSourceAttachmentIDSeparator) } -func FindTrafficSourceAttachmentByThreePartKey(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType, trafficSourceID string) (*autoscaling.TrafficSourceState, error) { +func findTrafficSourceAttachmentByThreePartKey(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType, trafficSourceID string) (*awstypes.TrafficSourceState, error) { input := &autoscaling.DescribeTrafficSourcesInput{ AutoScalingGroupName: aws.String(asgName), TrafficSourceType: aws.String(trafficSourceType), @@ -178,16 +184,16 @@ func FindTrafficSourceAttachmentByThreePartKey(ctx context.Context, conn *autosc return nil, err } - output = slices.Filter(output, func(v *autoscaling.TrafficSourceState) bool { - return aws.StringValue(v.Identifier) == trafficSourceID + output = slices.Filter(output, func(v awstypes.TrafficSourceState) bool { + return aws.ToString(v.Identifier) == trafficSourceID }) - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func statusTrafficSourceAttachment(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType, trafficSourceID string) retry.StateRefreshFunc { +func statusTrafficSourceAttachment(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType, trafficSourceID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindTrafficSourceAttachmentByThreePartKey(ctx, conn, asgName, trafficSourceType, trafficSourceID) + output, err := findTrafficSourceAttachmentByThreePartKey(ctx, conn, asgName, trafficSourceType, trafficSourceID) if tfresource.NotFound(err) { return nil, "", nil @@ -197,11 +203,11 @@ func statusTrafficSourceAttachment(ctx context.Context, conn *autoscaling.AutoSc return nil, "", err } - return output, aws.StringValue(output.State), nil + return output, aws.ToString(output.State), nil } } -func waitTrafficSourceAttachmentCreated(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType, trafficSourceID string, timeout time.Duration) (*autoscaling.TrafficSourceState, error) { +func waitTrafficSourceAttachmentCreated(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType, trafficSourceID string, timeout time.Duration) (*awstypes.TrafficSourceState, error) { stateConf := &retry.StateChangeConf{ Pending: []string{TrafficSourceStateAdding}, Target: []string{TrafficSourceStateAdded, TrafficSourceStateInService}, @@ -211,14 +217,14 @@ func waitTrafficSourceAttachmentCreated(ctx context.Context, conn *autoscaling.A outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*autoscaling.TrafficSourceState); ok { + if output, ok := outputRaw.(*awstypes.TrafficSourceState); ok { return output, err } return nil, err } -func waitTrafficSourceAttachmentDeleted(ctx context.Context, conn *autoscaling.AutoScaling, asgName, trafficSourceType, trafficSourceID string, timeout time.Duration) (*autoscaling.TrafficSourceState, error) { +func waitTrafficSourceAttachmentDeleted(ctx context.Context, conn *autoscaling.Client, asgName, trafficSourceType, trafficSourceID string, timeout time.Duration) (*awstypes.TrafficSourceState, error) { stateConf := &retry.StateChangeConf{ Pending: []string{TrafficSourceStateRemoving, TrafficSourceStateRemoved}, Target: []string{}, @@ -228,7 +234,7 @@ func waitTrafficSourceAttachmentDeleted(ctx context.Context, conn *autoscaling.A outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*autoscaling.TrafficSourceState); ok { + if output, ok := outputRaw.(*awstypes.TrafficSourceState); ok { return output, err } diff --git a/internal/service/autoscaling/traffic_source_attachment_test.go b/internal/service/autoscaling/traffic_source_attachment_test.go index 7f11982b884..91cce0067a1 100644 --- a/internal/service/autoscaling/traffic_source_attachment_test.go +++ b/internal/service/autoscaling/traffic_source_attachment_test.go @@ -177,15 +177,9 @@ func testAccCheckTrafficSourceAttachmentExists(ctx context.Context, n string) re return fmt.Errorf("Not found: %s", n) } - asgName, trafficSourceType, trafficSourceID, err := tfautoscaling.TrafficSourceAttachmentParseResourceID(rs.Primary.ID) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) - if err != nil { - return err - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) - - _, err = tfautoscaling.FindTrafficSourceAttachmentByThreePartKey(ctx, conn, asgName, trafficSourceType, trafficSourceID) + _, err := tfautoscaling.FindTrafficSourceAttachmentByThreePartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.Attributes["traffic_source.0.type"], rs.Primary.Attributes["traffic_source.0.identifier"]) return err } @@ -193,20 +187,14 @@ func testAccCheckTrafficSourceAttachmentExists(ctx context.Context, n string) re func testAccCheckTrafficSourceAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AutoScalingClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_autoscaling_traffic_source_attachment" { continue } - asgName, trafficSourceType, trafficSourceID, err := tfautoscaling.TrafficSourceAttachmentParseResourceID(rs.Primary.ID) - - if err != nil { - return err - } - - _, err = tfautoscaling.FindTrafficSourceAttachmentByThreePartKey(ctx, conn, asgName, trafficSourceType, trafficSourceID) + _, err := tfautoscaling.FindTrafficSourceAttachmentByThreePartKey(ctx, conn, rs.Primary.Attributes["autoscaling_group_name"], rs.Primary.Attributes["traffic_source.0.type"], rs.Primary.Attributes["traffic_source.0.identifier"]) if tfresource.NotFound(err) { continue diff --git a/internal/service/gamelift/game_server_group.go b/internal/service/gamelift/game_server_group.go index fc0ae888022..da18974594b 100644 --- a/internal/service/gamelift/game_server_group.go +++ b/internal/service/gamelift/game_server_group.go @@ -10,8 +10,9 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "strings" "time" + "github.com/aws/aws-sdk-go-v2/service/autoscaling" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/gamelift" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -259,7 +260,7 @@ func resourceGameServerGroupCreate(ctx context.Context, d *schema.ResourceData, func resourceGameServerGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).GameLiftConn(ctx) - autoscalingConn := meta.(*conns.AWSClient).AutoScalingConn(ctx) + autoscalingConn := meta.(*conns.AWSClient).AutoScalingClient(ctx) gameServerGroupName := d.Id() @@ -275,8 +276,8 @@ func resourceGameServerGroupRead(ctx context.Context, d *schema.ResourceData, me } autoScalingGroupName := strings.Split(aws.StringValue(gameServerGroup.AutoScalingGroupArn), "/")[1] - autoScalingGroupOutput, err := autoscalingConn.DescribeAutoScalingGroupsWithContext(ctx, &autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: []*string{aws.String(autoScalingGroupName)}, + autoScalingGroupOutput, err := autoscalingConn.DescribeAutoScalingGroups(ctx, &autoscaling.DescribeAutoScalingGroupsInput{ + AutoScalingGroupNames: []string{autoScalingGroupName}, }) if err != nil { return sdkdiag.AppendErrorf(diags, "reading GameLift Game Server Group (%s): reading AutoScaling Group: %s", gameServerGroupName, err) @@ -286,9 +287,9 @@ func resourceGameServerGroupRead(ctx context.Context, d *schema.ResourceData, me } autoScalingGroup := autoScalingGroupOutput.AutoScalingGroups[0] - describePoliciesOutput, err := autoscalingConn.DescribePoliciesWithContext(ctx, &autoscaling.DescribePoliciesInput{ + describePoliciesOutput, err := autoscalingConn.DescribePolicies(ctx, &autoscaling.DescribePoliciesInput{ AutoScalingGroupName: aws.String(autoScalingGroupName), - PolicyNames: []*string{aws.String(gameServerGroupName)}, + PolicyNames: []string{gameServerGroupName}, }) if err != nil { @@ -482,17 +483,13 @@ func expandTargetTrackingConfiguration(tfMap map[string]interface{}) *gamelift.T return apiObject } -func flattenGameServerGroupAutoScalingPolicy(apiObject *autoscaling.ScalingPolicy) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenGameServerGroupAutoScalingPolicy(apiObject autoscalingtypes.ScalingPolicy) map[string]interface{} { tfMap := map[string]interface{}{ "target_tracking_configuration": []interface{}{flattenTargetTrackingConfiguration(apiObject.TargetTrackingConfiguration)}, } if v := apiObject.EstimatedInstanceWarmup; v != nil { - tfMap["estimated_instance_warmup"] = aws.Int64Value(v) + tfMap["estimated_instance_warmup"] = aws.Int32Value(v) } return tfMap @@ -514,7 +511,7 @@ func flattenInstanceDefinition(apiObject *gamelift.InstanceDefinition) map[strin return tfMap } -func flattenAutoScalingLaunchTemplateSpecification(apiObject *autoscaling.LaunchTemplateSpecification) []map[string]interface{} { +func flattenAutoScalingLaunchTemplateSpecification(apiObject *autoscalingtypes.LaunchTemplateSpecification) []map[string]interface{} { if apiObject == nil { return nil } @@ -552,7 +549,7 @@ func flattenInstanceDefinitions(apiObjects []*gamelift.InstanceDefinition) []int return tfList } -func flattenTargetTrackingConfiguration(apiObject *autoscaling.TargetTrackingConfiguration) map[string]interface{} { +func flattenTargetTrackingConfiguration(apiObject *autoscalingtypes.TargetTrackingConfiguration) map[string]interface{} { if apiObject == nil { return nil } diff --git a/internal/tags/key_value_tags.go b/internal/tags/key_value_tags.go index 392d5fa6e8e..3cbeb8a030d 100644 --- a/internal/tags/key_value_tags.go +++ b/internal/tags/key_value_tags.go @@ -282,48 +282,6 @@ func (tags KeyValueTags) Keys() []string { return result } -// ListofMap returns a list of flattened tags. -// Compatible with setting Terraform state for strongly typed configuration blocks. -func (tags KeyValueTags) ListofMap() []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(tags)) - - for k, v := range tags { - m := map[string]interface{}{ - "key": k, - "value": "", - } - - if v == nil { - result = append(result, m) - continue - } - - if v.Value != nil { - m["value"] = *v.Value - } - - for k, v := range v.AdditionalBoolFields { - m[ToSnakeCase(k)] = false - - if v != nil { - m[ToSnakeCase(k)] = *v - } - } - - for k, v := range v.AdditionalStringFields { - m[ToSnakeCase(k)] = "" - - if v != nil { - m[ToSnakeCase(k)] = *v - } - } - - result = append(result, m) - } - - return result -} - // Map returns tag keys mapped to their values. func (tags KeyValueTags) Map() map[string]string { result := make(map[string]string, len(tags)) diff --git a/names/data/names_data.csv b/names/data/names_data.csv index c4971576db3..18616a0a6de 100644 --- a/names/data/names_data.csv +++ b/names/data/names_data.csv @@ -29,7 +29,7 @@ appsync,appsync,appsync,appsync,,appsync,,,AppSync,AppSync,,1,,,aws_appsync_,,ap ,,,,,,,,,,,,,,,,,Artifact,AWS,x,,,,,,,,,No SDK support athena,athena,athena,athena,,athena,,,Athena,Athena,,,2,,aws_athena_,,athena_,Athena,Amazon,,,,,,,Athena,ListDataCatalogs,, auditmanager,auditmanager,auditmanager,auditmanager,,auditmanager,,,AuditManager,AuditManager,,,2,,aws_auditmanager_,,auditmanager_,Audit Manager,AWS,,,,,,,AuditManager,GetAccountStatus,, -autoscaling,autoscaling,autoscaling,autoscaling,,autoscaling,,,AutoScaling,AutoScaling,,1,,aws_(autoscaling_|launch_configuration),aws_autoscaling_,,autoscaling_;launch_configuration,Auto Scaling,,,,,,,,Auto Scaling,DescribeAutoScalingGroups,, +autoscaling,autoscaling,autoscaling,autoscaling,,autoscaling,,,AutoScaling,AutoScaling,,,2,aws_(autoscaling_|launch_configuration),aws_autoscaling_,,autoscaling_;launch_configuration,Auto Scaling,,,,,,,,Auto Scaling,DescribeAutoScalingGroups,, autoscaling-plans,autoscalingplans,autoscalingplans,autoscalingplans,,autoscalingplans,,,AutoScalingPlans,AutoScalingPlans,,,2,,aws_autoscalingplans_,,autoscalingplans_,Auto Scaling Plans,,,,,,,,Auto Scaling Plans,DescribeScalingPlans,, ,,,,,,,,,,,,,,,,,Backint Agent for SAP HANA,AWS,x,,,,,,,,,No SDK support backup,backup,backup,backup,,backup,,,Backup,Backup,,1,,,aws_backup_,,backup_,Backup,AWS,,,,,,,Backup,ListBackupPlans,,