diff --git a/.changelog/30493.txt b/.changelog/30493.txt new file mode 100644 index 00000000000..b13a2ab9f87 --- /dev/null +++ b/.changelog/30493.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_elasticache_replication_group: Fix `unexpected state 'snapshotting'` errors when increasing or decreasing replica count +``` \ No newline at end of file diff --git a/.changelog/34819.txt b/.changelog/34819.txt new file mode 100644 index 00000000000..50a7e6db17f --- /dev/null +++ b/.changelog/34819.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_elasticache_replication_group: Decrease replica count after other updates +``` \ No newline at end of file diff --git a/.changelog/37182.txt b/.changelog/37182.txt new file mode 100644 index 00000000000..6618fd8257a --- /dev/null +++ b/.changelog/37182.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_elasticache_replication_group: Increase default Delete timeout to 45 minutes +``` + +```release-note:enhancement +resource/aws_elasticache_replication_group: Use the configured Delete timeout when detaching from any global replication group +``` \ No newline at end of file diff --git a/internal/service/elasticache/cluster.go b/internal/service/elasticache/cluster.go index a5e44868b69..215936fda58 100644 --- a/internal/service/elasticache/cluster.go +++ b/internal/service/elasticache/cluster.go @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -38,13 +39,9 @@ const ( defaultMemcachedPort = "11211" ) -const ( - cacheClusterCreatedTimeout = 40 * time.Minute -) - // @SDKResource("aws_elasticache_cluster", name="Cluster") // @Tags(identifierAttribute="arn") -func ResourceCluster() *schema.Resource { +func resourceCluster() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterCreate, ReadWithoutTimeout: resourceClusterRead, @@ -337,12 +334,12 @@ func ResourceCluster() *schema.Resource { }, CustomizeDiff: customdiff.Sequence( - CustomizeDiffValidateClusterAZMode, - CustomizeDiffValidateClusterEngineVersion, + customizeDiffValidateClusterAZMode, + customizeDiffValidateClusterEngineVersion, customizeDiffEngineVersionForceNewOnDowngrade, - CustomizeDiffValidateClusterNumCacheNodes, - CustomizeDiffClusterMemcachedNodeType, - CustomizeDiffValidateClusterMemcachedSnapshotIdentifier, + customizeDiffValidateClusterNumCacheNodes, + customizeDiffClusterMemcachedNodeType, + customizeDiffValidateClusterMemcachedSnapshotIdentifier, verify.SetTagsDiff, ), } @@ -475,7 +472,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int d.SetId(id) - if _, err := waitCacheClusterAvailable(ctx, conn, d.Id(), cacheClusterCreatedTimeout); err != nil { + const ( + timeout = 40 * time.Minute + ) + if _, err := waitCacheClusterAvailable(ctx, conn, d.Id(), timeout); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Cache Cluster (%s) create: %s", d.Id(), err) } @@ -500,12 +500,14 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - c, err := FindCacheClusterWithNodeInfoByID(ctx, conn, d.Id()) + c, err := findCacheClusterWithNodeInfoByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ElastiCache Cache Cluster (%s) not found, removing from state", d.Id()) d.SetId("") return diags } + if err != nil { return sdkdiag.AppendErrorf(diags, "reading ElastiCache Cache Cluster (%s): %s", d.Id(), err) } @@ -558,33 +560,6 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter return diags } -func setFromCacheCluster(d *schema.ResourceData, c *elasticache.CacheCluster) error { - d.Set("node_type", c.CacheNodeType) - - d.Set("engine", c.Engine) - if aws.StringValue(c.Engine) == engineRedis { - if err := setEngineVersionRedis(d, c.EngineVersion); err != nil { - return err // nosemgrep:ci.bare-error-returns - } - } else { - setEngineVersionMemcached(d, c.EngineVersion) - } - d.Set("auto_minor_version_upgrade", strconv.FormatBool(aws.BoolValue(c.AutoMinorVersionUpgrade))) - - d.Set("subnet_group_name", c.CacheSubnetGroupName) - if err := d.Set("security_group_ids", flattenSecurityGroupIDs(c.SecurityGroups)); err != nil { - return fmt.Errorf("setting security_group_ids: %w", err) - } - - if c.CacheParameterGroup != nil { - d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) - } - - d.Set("maintenance_window", c.PreferredMaintenanceWindow) - - return nil -} - func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) @@ -723,7 +698,10 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int return sdkdiag.AppendErrorf(diags, "updating ElastiCache cluster (%s), error: %s", d.Id(), err) } - _, err = waitCacheClusterAvailable(ctx, conn, d.Id(), CacheClusterUpdatedTimeout) + const ( + timeout = 80 * time.Minute + ) + _, err = waitCacheClusterAvailable(ctx, conn, d.Id(), timeout) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Cache Cluster (%s) to update: %s", d.Id(), err) } @@ -733,48 +711,6 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int return append(diags, resourceClusterRead(ctx, d, meta)...) } -func getCacheNodesToRemove(oldNumberOfNodes int, cacheNodesToRemove int) []*string { - nodesIdsToRemove := []*string{} - for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- { - s := fmt.Sprintf("%04d", i) - nodesIdsToRemove = append(nodesIdsToRemove, &s) - } - - return nodesIdsToRemove -} - -func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { - sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) - copy(sortedCacheNodes, c.CacheNodes) - sort.Sort(byCacheNodeId(sortedCacheNodes)) - - cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes)) - - for _, node := range sortedCacheNodes { - if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil { - return fmt.Errorf("Unexpected nil pointer in: %s", node) - } - cacheNodeData = append(cacheNodeData, map[string]interface{}{ - "id": aws.StringValue(node.CacheNodeId), - "address": aws.StringValue(node.Endpoint.Address), - "port": aws.Int64Value(node.Endpoint.Port), - "availability_zone": aws.StringValue(node.CustomerAvailabilityZone), - "outpost_arn": aws.StringValue(node.CustomerOutpostArn), - }) - } - - return d.Set("cache_nodes", cacheNodeData) -} - -type byCacheNodeId []*elasticache.CacheNode - -func (b byCacheNodeId) Len() int { return len(b) } -func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byCacheNodeId) Less(i, j int) bool { - return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil && - aws.StringValue(b[i].CacheNodeId) < aws.StringValue(b[j].CacheNodeId) -} - func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) @@ -787,7 +723,10 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int } return sdkdiag.AppendErrorf(diags, "deleting ElastiCache Cache Cluster (%s): %s", d.Id(), err) } - _, err = WaitCacheClusterDeleted(ctx, conn, d.Id(), CacheClusterDeletedTimeout) + const ( + timeout = 40 * time.Minute + ) + _, err = waitCacheClusterDeleted(ctx, conn, d.Id(), timeout) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Cache Cluster (%s) to be deleted: %s", d.Id(), err) } @@ -850,3 +789,208 @@ func DeleteCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cach return err } + +func findCacheClusterByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { + input := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String(id), + } + + return findCacheCluster(ctx, conn, input, tfslices.PredicateTrue[*elasticache.CacheCluster]()) +} +func findCacheClusterWithNodeInfoByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { + input := &elasticache.DescribeCacheClustersInput{ + CacheClusterId: aws.String(id), + ShowCacheNodeInfo: aws.Bool(true), + } + + return findCacheCluster(ctx, conn, input, tfslices.PredicateTrue[*elasticache.CacheCluster]()) +} + +func findCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheClustersInput, filter tfslices.Predicate[*elasticache.CacheCluster]) (*elasticache.CacheCluster, error) { + output, err := findCacheClusters(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findCacheClusters(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheClustersInput, filter tfslices.Predicate[*elasticache.CacheCluster]) ([]*elasticache.CacheCluster, error) { + var output []*elasticache.CacheCluster + + err := conn.DescribeCacheClustersPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.CacheClusters { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheClusterNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findCacheClusterByID(ctx, conn, cacheClusterID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.CacheClusterStatus), nil + } +} + +const ( + cacheClusterStatusAvailable = "available" + cacheClusterStatusCreating = "creating" + cacheClusterStatusDeleted = "deleted" + cacheClusterStatusDeleting = "deleting" + cacheClusterStatusIncompatibleNetwork = "incompatible-network" + cacheClusterStatusModifying = "modifying" + cacheClusterStatusRebootingClusterNodes = "rebooting cluster nodes" + cacheClusterStatusRestoreFailed = "restore-failed" + cacheClusterStatusSnapshotting = "snapshotting" +) + +func waitCacheClusterAvailable(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.CacheCluster, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{ + cacheClusterStatusCreating, + cacheClusterStatusModifying, + cacheClusterStatusSnapshotting, + cacheClusterStatusRebootingClusterNodes, + }, + Target: []string{cacheClusterStatusAvailable}, + Refresh: statusCacheCluster(ctx, conn, cacheClusterID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*elasticache.CacheCluster); ok { + return output, err + } + + return nil, err +} + +func waitCacheClusterDeleted(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.CacheCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + cacheClusterStatusCreating, + cacheClusterStatusAvailable, + cacheClusterStatusModifying, + cacheClusterStatusDeleting, + cacheClusterStatusIncompatibleNetwork, + cacheClusterStatusRestoreFailed, + cacheClusterStatusSnapshotting, + }, + Target: []string{}, + Refresh: statusCacheCluster(ctx, conn, cacheClusterID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*elasticache.CacheCluster); ok { + return output, err + } + + return nil, err +} + +func getCacheNodesToRemove(oldNumberOfNodes int, cacheNodesToRemove int) []*string { + nodesIdsToRemove := []*string{} + for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- { + s := fmt.Sprintf("%04d", i) + nodesIdsToRemove = append(nodesIdsToRemove, &s) + } + + return nodesIdsToRemove +} + +func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { + sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) + copy(sortedCacheNodes, c.CacheNodes) + sort.Sort(byCacheNodeId(sortedCacheNodes)) + + cacheNodeData := make([]map[string]interface{}, 0, len(sortedCacheNodes)) + + for _, node := range sortedCacheNodes { + if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil { + return fmt.Errorf("Unexpected nil pointer in: %s", node) + } + cacheNodeData = append(cacheNodeData, map[string]interface{}{ + "id": aws.StringValue(node.CacheNodeId), + "address": aws.StringValue(node.Endpoint.Address), + "port": aws.Int64Value(node.Endpoint.Port), + "availability_zone": aws.StringValue(node.CustomerAvailabilityZone), + "outpost_arn": aws.StringValue(node.CustomerOutpostArn), + }) + } + + return d.Set("cache_nodes", cacheNodeData) +} + +type byCacheNodeId []*elasticache.CacheNode + +func (b byCacheNodeId) Len() int { return len(b) } +func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCacheNodeId) Less(i, j int) bool { + return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil && + aws.StringValue(b[i].CacheNodeId) < aws.StringValue(b[j].CacheNodeId) +} + +func setFromCacheCluster(d *schema.ResourceData, c *elasticache.CacheCluster) error { + d.Set("node_type", c.CacheNodeType) + + d.Set("engine", c.Engine) + if aws.StringValue(c.Engine) == engineRedis { + if err := setEngineVersionRedis(d, c.EngineVersion); err != nil { + return err // nosemgrep:ci.bare-error-returns + } + } else { + setEngineVersionMemcached(d, c.EngineVersion) + } + d.Set("auto_minor_version_upgrade", strconv.FormatBool(aws.BoolValue(c.AutoMinorVersionUpgrade))) + + d.Set("subnet_group_name", c.CacheSubnetGroupName) + if err := d.Set("security_group_ids", flattenSecurityGroupIDs(c.SecurityGroups)); err != nil { + return fmt.Errorf("setting security_group_ids: %w", err) + } + + if c.CacheParameterGroup != nil { + d.Set("parameter_group_name", c.CacheParameterGroup.CacheParameterGroupName) + } + + d.Set("maintenance_window", c.PreferredMaintenanceWindow) + + return nil +} diff --git a/internal/service/elasticache/cluster_data_source.go b/internal/service/elasticache/cluster_data_source.go index 1ed16733b24..691eae4b1a5 100644 --- a/internal/service/elasticache/cluster_data_source.go +++ b/internal/service/elasticache/cluster_data_source.go @@ -19,8 +19,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKDataSource("aws_elasticache_cluster") -func DataSourceCluster() *schema.Resource { +// @SDKDataSource("aws_elasticache_cluster", name="Cluster") +func dataSourceCluster() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceClusterRead, @@ -177,57 +177,49 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig clusterID := d.Get("cluster_id").(string) - cluster, err := FindCacheClusterWithNodeInfoByID(ctx, conn, clusterID) - if tfresource.NotFound(err) { - return sdkdiag.AppendErrorf(diags, "Your query returned no results. Please change your search criteria and try again") - } + cluster, err := findCacheClusterWithNodeInfoByID(ctx, conn, clusterID) + if err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Cache Cluster (%s): %s", clusterID, err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache Cluster", err)) } d.SetId(aws.StringValue(cluster.CacheClusterId)) - + d.Set("arn", cluster.ARN) + d.Set("availability_zone", cluster.PreferredAvailabilityZone) + if cluster.ConfigurationEndpoint != nil { + clusterAddress, port := aws.StringValue(cluster.ConfigurationEndpoint.Address), aws.Int64Value(cluster.ConfigurationEndpoint.Port) + d.Set("cluster_address", clusterAddress) + d.Set("configuration_endpoint", fmt.Sprintf("%s:%d", clusterAddress, port)) + d.Set("port", port) + } d.Set("cluster_id", cluster.CacheClusterId) - d.Set("node_type", cluster.CacheNodeType) - d.Set("num_cache_nodes", cluster.NumCacheNodes) - d.Set("subnet_group_name", cluster.CacheSubnetGroupName) d.Set("engine", cluster.Engine) d.Set("engine_version", cluster.EngineVersion) d.Set("ip_discovery", cluster.IpDiscovery) - d.Set("network_type", cluster.NetworkType) - d.Set("preferred_outpost_arn", cluster.PreferredOutpostArn) - d.Set("security_group_ids", flattenSecurityGroupIDs(cluster.SecurityGroups)) - - if cluster.CacheParameterGroup != nil { - d.Set("parameter_group_name", cluster.CacheParameterGroup.CacheParameterGroupName) - } - - d.Set("replication_group_id", cluster.ReplicationGroupId) - d.Set("log_delivery_configuration", flattenLogDeliveryConfigurations(cluster.LogDeliveryConfigurations)) d.Set("maintenance_window", cluster.PreferredMaintenanceWindow) - d.Set("snapshot_window", cluster.SnapshotWindow) - d.Set("snapshot_retention_limit", cluster.SnapshotRetentionLimit) - d.Set("availability_zone", cluster.PreferredAvailabilityZone) - + d.Set("network_type", cluster.NetworkType) + d.Set("node_type", cluster.CacheNodeType) if cluster.NotificationConfiguration != nil { if aws.StringValue(cluster.NotificationConfiguration.TopicStatus) == "active" { d.Set("notification_topic_arn", cluster.NotificationConfiguration.TopicArn) } } - - if cluster.ConfigurationEndpoint != nil { - d.Set("port", cluster.ConfigurationEndpoint.Port) - d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", *cluster.ConfigurationEndpoint.Address, *cluster.ConfigurationEndpoint.Port))) - d.Set("cluster_address", aws.String(*cluster.ConfigurationEndpoint.Address)) + d.Set("num_cache_nodes", cluster.NumCacheNodes) + if cluster.CacheParameterGroup != nil { + d.Set("parameter_group_name", cluster.CacheParameterGroup.CacheParameterGroupName) } + d.Set("preferred_outpost_arn", cluster.PreferredOutpostArn) + d.Set("replication_group_id", cluster.ReplicationGroupId) + d.Set("security_group_ids", flattenSecurityGroupIDs(cluster.SecurityGroups)) + d.Set("snapshot_retention_limit", cluster.SnapshotRetentionLimit) + d.Set("snapshot_window", cluster.SnapshotWindow) + d.Set("subnet_group_name", cluster.CacheSubnetGroupName) if err := setCacheNodeData(d, cluster); err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Cache Cluster (%s): %s", clusterID, err) + return sdkdiag.AppendErrorf(diags, "setting cache_nodes: %s", err) } - d.Set("arn", cluster.ARN) - tags, err := listTags(ctx, conn, aws.StringValue(cluster.ARN)) if err != nil && !errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { diff --git a/internal/service/elasticache/cluster_test.go b/internal/service/elasticache/cluster_test.go index a808cd0d034..a724b93c7dc 100644 --- a/internal/service/elasticache/cluster_test.go +++ b/internal/service/elasticache/cluster_test.go @@ -117,6 +117,34 @@ func TestAccElastiCacheCluster_Engine_redis(t *testing.T) { }) } +func TestAccElastiCacheCluster_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var ec elasticache.CacheCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elasticache_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_engineRedis(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &ec), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelasticache.ResourceCluster(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccElastiCacheCluster_Engine_redis_v5(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { diff --git a/internal/service/elasticache/diff.go b/internal/service/elasticache/diff.go index dfedae91963..1c815e71e07 100644 --- a/internal/service/elasticache/diff.go +++ b/internal/service/elasticache/diff.go @@ -11,8 +11,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// CustomizeDiffValidateClusterAZMode validates that `num_cache_nodes` is greater than 1 when `az_mode` is "cross-az" -func CustomizeDiffValidateClusterAZMode(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +// customizeDiffValidateClusterAZMode validates that `num_cache_nodes` is greater than 1 when `az_mode` is "cross-az" +func customizeDiffValidateClusterAZMode(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if v, ok := diff.GetOk("az_mode"); !ok || v.(string) != elasticache.AZModeCrossAz { return nil } @@ -23,8 +23,8 @@ func CustomizeDiffValidateClusterAZMode(_ context.Context, diff *schema.Resource return errors.New(`az_mode "cross-az" is not supported with num_cache_nodes = 1`) } -// CustomizeDiffValidateClusterNumCacheNodes validates that `num_cache_nodes` is 1 when `engine` is "redis" -func CustomizeDiffValidateClusterNumCacheNodes(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +// customizeDiffValidateClusterNumCacheNodes validates that `num_cache_nodes` is 1 when `engine` is "redis" +func customizeDiffValidateClusterNumCacheNodes(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if v, ok := diff.GetOk("engine"); !ok || v.(string) == engineMemcached { return nil } @@ -35,8 +35,8 @@ func CustomizeDiffValidateClusterNumCacheNodes(_ context.Context, diff *schema.R return errors.New(`engine "redis" does not support num_cache_nodes > 1`) } -// CustomizeDiffClusterMemcachedNodeType causes re-creation when `node_type` is changed and `engine` is "memcached" -func CustomizeDiffClusterMemcachedNodeType(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +// customizeDiffClusterMemcachedNodeType causes re-creation when `node_type` is changed and `engine` is "memcached" +func customizeDiffClusterMemcachedNodeType(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { // Engine memcached does not currently support vertical scaling // https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Scaling.html#Scaling.Memcached.Vertically if diff.Id() == "" || !diff.HasChange("node_type") { @@ -48,8 +48,8 @@ func CustomizeDiffClusterMemcachedNodeType(_ context.Context, diff *schema.Resou return diff.ForceNew("node_type") } -// CustomizeDiffValidateClusterMemcachedSnapshotIdentifier validates that `final_snapshot_identifier` is not set when `engine` is "memcached" -func CustomizeDiffValidateClusterMemcachedSnapshotIdentifier(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +// customizeDiffValidateClusterMemcachedSnapshotIdentifier validates that `final_snapshot_identifier` is not set when `engine` is "memcached" +func customizeDiffValidateClusterMemcachedSnapshotIdentifier(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if v, ok := diff.GetOk("engine"); !ok || v.(string) == engineRedis { return nil } @@ -59,8 +59,8 @@ func CustomizeDiffValidateClusterMemcachedSnapshotIdentifier(_ context.Context, return errors.New(`engine "memcached" does not support final_snapshot_identifier`) } -// CustomizeDiffValidateReplicationGroupAutomaticFailover validates that `automatic_failover_enabled` is set when `multi_az_enabled` is true -func CustomizeDiffValidateReplicationGroupAutomaticFailover(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +// customizeDiffValidateReplicationGroupAutomaticFailover validates that `automatic_failover_enabled` is set when `multi_az_enabled` is true +func customizeDiffValidateReplicationGroupAutomaticFailover(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if v := diff.Get("multi_az_enabled").(bool); !v { return nil } diff --git a/internal/service/elasticache/engine_version.go b/internal/service/elasticache/engine_version.go index 732c16f9d14..7818b2b1ebc 100644 --- a/internal/service/elasticache/engine_version.go +++ b/internal/service/elasticache/engine_version.go @@ -55,8 +55,8 @@ func validRedisVersionString(v any, k string) (ws []string, errors []error) { return } -// CustomizeDiffValidateClusterEngineVersion validates the correct format for `engine_version`, based on `engine` -func CustomizeDiffValidateClusterEngineVersion(_ context.Context, diff *schema.ResourceDiff, _ any) error { +// customizeDiffValidateClusterEngineVersion validates the correct format for `engine_version`, based on `engine` +func customizeDiffValidateClusterEngineVersion(_ context.Context, diff *schema.ResourceDiff, _ any) error { engineVersion, ok := diff.GetOk("engine_version") if !ok { return nil diff --git a/internal/service/elasticache/engine_version_test.go b/internal/service/elasticache/engine_version_test.go index f5a5aa88f0a..79619c662c0 100644 --- a/internal/service/elasticache/engine_version_test.go +++ b/internal/service/elasticache/engine_version_test.go @@ -775,10 +775,18 @@ func (d *mockChangesDiffer) Get(key string) any { return d.values[key].Get() } +func (d *mockChangesDiffer) GetOk(string) (any, bool) { + return nil, false +} + func (d *mockChangesDiffer) HasChange(key string) bool { return d.values[key].HasChange() } +func (d *mockChangesDiffer) HasChanges(...string) bool { + return false +} + func (d *mockChangesDiffer) GetChange(key string) (any, any) { return d.values[key].GetChange() } diff --git a/internal/service/elasticache/errors.go b/internal/service/elasticache/errors.go new file mode 100644 index 00000000000..73bac0dce23 --- /dev/null +++ b/internal/service/elasticache/errors.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package elasticache + +const ( + errCodeDependencyViolation = "DependencyViolation" +) diff --git a/internal/service/elasticache/exports_test.go b/internal/service/elasticache/exports_test.go index d269d88c87b..396d7a86ea7 100644 --- a/internal/service/elasticache/exports_test.go +++ b/internal/service/elasticache/exports_test.go @@ -5,10 +5,28 @@ package elasticache // Exports for use in tests only. var ( - ResourceServerlessCache = newServerlessCacheResource - ResourceSubnetGroup = resourceSubnetGroup + ResourceCluster = resourceCluster + ResourceGlobalReplicationGroup = resourceGlobalReplicationGroup + ResourceParameterGroup = resourceParameterGroup + ResourceReplicationGroup = resourceReplicationGroup + ResourceServerlessCache = newServerlessCacheResource + ResourceSubnetGroup = resourceSubnetGroup + ResourceUser = resourceUser + ResourceUserGroup = resourceUserGroup + ResourceUserGroupAssociation = resourceUserGroupAssociation - FindCacheSubnetGroupByName = findCacheSubnetGroupByName - - ReplicationGroupAvailableModifyDelay = replicationGroupAvailableModifyDelay + FindCacheClusterByID = findCacheClusterByID + FindCacheParameterGroup = findCacheParameterGroup + FindCacheParameterGroupByName = findCacheParameterGroupByName + FindCacheSubnetGroupByName = findCacheSubnetGroupByName + FindGlobalReplicationGroupByID = findGlobalReplicationGroupByID + FindReplicationGroupByID = findReplicationGroupByID + FindServerlessCacheByID = findServerlessCacheByID + FindUserByID = findUserByID + FindUserGroupByID = findUserGroupByID + FindUserGroupAssociationByTwoPartKey = findUserGroupAssociationByTwoPartKey + ParameterChanges = parameterChanges + ParameterHash = parameterHash + WaitCacheClusterDeleted = waitCacheClusterDeleted + WaitReplicationGroupAvailable = waitReplicationGroupAvailable ) diff --git a/internal/service/elasticache/find.go b/internal/service/elasticache/find.go deleted file mode 100644 index dc6556f8f0d..00000000000 --- a/internal/service/elasticache/find.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package elasticache - -import ( - "context" - "fmt" - "strings" - - elasticache_v2 "github.com/aws/aws-sdk-go-v2/service/elasticache" - awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -// FindReplicationGroupByID retrieves an ElastiCache Replication Group by id. -func FindReplicationGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.ReplicationGroup, error) { - input := &elasticache.DescribeReplicationGroupsInput{ - ReplicationGroupId: aws.String(id), - } - output, err := conn.DescribeReplicationGroupsWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - if err != nil { - return nil, err - } - - if output == nil || len(output.ReplicationGroups) == 0 || output.ReplicationGroups[0] == nil { - return nil, &retry.NotFoundError{ - Message: "empty result", - LastRequest: input, - } - } - - return output.ReplicationGroups[0], nil -} - -// FindReplicationGroupMemberClustersByID retrieves all of an ElastiCache Replication Group's MemberClusters by the id of the Replication Group. -func FindReplicationGroupMemberClustersByID(ctx context.Context, conn *elasticache.ElastiCache, id string) ([]*elasticache.CacheCluster, error) { - rg, err := FindReplicationGroupByID(ctx, conn, id) - if err != nil { - return nil, err - } - - clusters, err := FindCacheClustersByID(ctx, conn, aws.StringValueSlice(rg.MemberClusters)) - if err != nil { - return clusters, err - } - if len(clusters) == 0 { - return clusters, &retry.NotFoundError{ - Message: fmt.Sprintf("No Member Clusters found in Replication Group (%s)", id), - } - } - - return clusters, nil -} - -// FindCacheClusterByID retrieves an ElastiCache Cache Cluster by id. -func FindCacheClusterByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { - input := &elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(id), - } - return FindCacheCluster(ctx, conn, input) -} - -// FindServerlessCacheByID retrieves an ElastiCache Cache Cluster by id. -func FindServerlessCacheByID(ctx context.Context, conn *elasticache_v2.Client, id string) (awstypes.ServerlessCache, error) { - input := &elasticache_v2.DescribeServerlessCachesInput{ - ServerlessCacheName: aws.String(id), - } - - return FindServerlessCacheCluster(ctx, conn, input) -} - -// FindCacheClusterWithNodeInfoByID retrieves an ElastiCache Cache Cluster with Node Info by id. -func FindCacheClusterWithNodeInfoByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { - input := &elasticache.DescribeCacheClustersInput{ - CacheClusterId: aws.String(id), - ShowCacheNodeInfo: aws.Bool(true), - } - return FindCacheCluster(ctx, conn, input) -} - -// FindCacheCluster retrieves an ElastiCache Cache Cluster using DescribeCacheClustersInput. -func FindCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheClustersInput) (*elasticache.CacheCluster, error) { - result, err := conn.DescribeCacheClustersWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheClusterNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - if err != nil { - return nil, err - } - - if result == nil || len(result.CacheClusters) == 0 || result.CacheClusters[0] == nil { - return nil, &retry.NotFoundError{ - Message: "empty result", - LastRequest: input, - } - } - - return result.CacheClusters[0], nil -} - -// FindServerlessChache retrieves an ElastiCache Cache Cluster using DescribeCacheClustersInput. -func FindServerlessCacheCluster(ctx context.Context, conn *elasticache_v2.Client, input *elasticache_v2.DescribeServerlessCachesInput) (awstypes.ServerlessCache, error) { - result, err := conn.DescribeServerlessCaches(ctx, input) - - if errs.IsA[*awstypes.ServerlessCacheNotFoundFault](err) { - return awstypes.ServerlessCache{}, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return awstypes.ServerlessCache{}, err - } - - if result == nil || len(result.ServerlessCaches) == 0 { - return awstypes.ServerlessCache{}, tfresource.NewEmptyResultError(input) - } - - return result.ServerlessCaches[0], nil -} - -// FindCacheClustersByID retrieves a list of ElastiCache Cache Clusters by id. -// Order of the clusters is not guaranteed. -func FindCacheClustersByID(ctx context.Context, conn *elasticache.ElastiCache, idList []string) ([]*elasticache.CacheCluster, error) { - var results []*elasticache.CacheCluster - ids := make(map[string]bool) - for _, v := range idList { - ids[v] = true - } - - input := &elasticache.DescribeCacheClustersInput{} - err := conn.DescribeCacheClustersPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheClustersOutput, _ bool) bool { - if page == nil || page.CacheClusters == nil || len(page.CacheClusters) == 0 { - return true - } - - for _, v := range page.CacheClusters { - if ids[aws.StringValue(v.CacheClusterId)] { - results = append(results, v) - delete(ids, aws.StringValue(v.CacheClusterId)) - if len(ids) == 0 { - break - } - } - } - - return len(ids) != 0 - }) - - return results, err -} - -// FindGlobalReplicationGroupByID retrieves an ElastiCache Global Replication Group by id. -func FindGlobalReplicationGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.GlobalReplicationGroup, error) { - input := &elasticache.DescribeGlobalReplicationGroupsInput{ - GlobalReplicationGroupId: aws.String(id), - ShowMemberInfo: aws.Bool(true), - } - output, err := conn.DescribeGlobalReplicationGroupsWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - if err != nil { - return nil, err - } - - if output == nil || len(output.GlobalReplicationGroups) == 0 || output.GlobalReplicationGroups[0] == nil { - return nil, &retry.NotFoundError{ - Message: "empty result", - LastRequest: input, - } - } - - return output.GlobalReplicationGroups[0], nil -} - -// FindGlobalReplicationGroupMemberByID retrieves a member Replication Group by id from a Global Replication Group. -func FindGlobalReplicationGroupMemberByID(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, id string) (*elasticache.GlobalReplicationGroupMember, error) { - globalReplicationGroup, err := FindGlobalReplicationGroupByID(ctx, conn, globalReplicationGroupID) - if err != nil { - return nil, &retry.NotFoundError{ - Message: "unable to retrieve enclosing Global Replication Group", - LastError: err, - } - } - - if globalReplicationGroup == nil || len(globalReplicationGroup.Members) == 0 { - return nil, &retry.NotFoundError{ - Message: "empty result", - } - } - - for _, member := range globalReplicationGroup.Members { - if aws.StringValue(member.ReplicationGroupId) == id { - return member, nil - } - } - - return nil, &retry.NotFoundError{ - Message: fmt.Sprintf("Replication Group (%s) not found in Global Replication Group (%s)", id, globalReplicationGroupID), - } -} - -func FindParameterGroupByName(ctx context.Context, conn *elasticache.ElastiCache, name string) (*elasticache.CacheParameterGroup, error) { - input := elasticache.DescribeCacheParameterGroupsInput{ - CacheParameterGroupName: aws.String(name), - } - - output, err := conn.DescribeCacheParameterGroupsWithContext(ctx, &input) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return tfresource.AssertSinglePtrResult(output.CacheParameterGroups) -} - -type redisParameterGroupFilter func(group *elasticache.CacheParameterGroup) bool - -func FindParameterGroupByFilter(ctx context.Context, conn *elasticache.ElastiCache, filters ...redisParameterGroupFilter) (*elasticache.CacheParameterGroup, error) { - parameterGroups, err := ListParameterGroups(ctx, conn, filters...) - if err != nil { - return nil, err - } - - switch count := len(parameterGroups); count { - case 0: - return nil, tfresource.NewEmptyResultError(nil) - case 1: - return parameterGroups[0], nil - default: - return nil, tfresource.NewTooManyResultsError(count, nil) - } -} - -func ListParameterGroups(ctx context.Context, conn *elasticache.ElastiCache, filters ...redisParameterGroupFilter) ([]*elasticache.CacheParameterGroup, error) { - var parameterGroups []*elasticache.CacheParameterGroup - err := conn.DescribeCacheParameterGroupsPagesWithContext(ctx, &elasticache.DescribeCacheParameterGroupsInput{}, func(page *elasticache.DescribeCacheParameterGroupsOutput, lastPage bool) bool { - PARAM_GROUPS: - for _, parameterGroup := range page.CacheParameterGroups { - for _, filter := range filters { - if !filter(parameterGroup) { - continue PARAM_GROUPS - } - } - parameterGroups = append(parameterGroups, parameterGroup) - } - return !lastPage - }) - return parameterGroups, err -} - -func FilterRedisParameterGroupFamily(familyName string) redisParameterGroupFilter { - return func(group *elasticache.CacheParameterGroup) bool { - return aws.StringValue(group.CacheParameterGroupFamily) == familyName - } -} - -func FilterRedisParameterGroupNameDefault(group *elasticache.CacheParameterGroup) bool { - name := aws.StringValue(group.CacheParameterGroupName) - if strings.HasPrefix(name, "default.") && !strings.HasSuffix(name, ".cluster.on") { - return true - } - return false -} - -func FilterRedisParameterGroupNameClusterEnabledDefault(group *elasticache.CacheParameterGroup) bool { - name := aws.StringValue(group.CacheParameterGroupName) - if strings.HasPrefix(name, "default.") && strings.HasSuffix(name, ".cluster.on") { - return true - } - return false -} diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index 14738fd4828..89cc11560f7 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -25,6 +25,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -37,17 +39,18 @@ const ( ) const ( - GlobalReplicationGroupMemberRolePrimary = "PRIMARY" - GlobalReplicationGroupMemberRoleSecondary = "SECONDARY" + globalReplicationGroupMemberRolePrimary = "PRIMARY" + globalReplicationGroupMemberRoleSecondary = "SECONDARY" ) -// @SDKResource("aws_elasticache_global_replication_group") -func ResourceGlobalReplicationGroup() *schema.Resource { +// @SDKResource("aws_elasticache_global_replication_group", name="Global Replication Group") +func resourceGlobalReplicationGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceGlobalReplicationGroupCreate, ReadWithoutTimeout: resourceGlobalReplicationGroupRead, UpdateWithoutTimeout: resourceGlobalReplicationGroupUpdate, DeleteWithoutTimeout: resourceGlobalReplicationGroupDelete, + Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { re := regexache.MustCompile("^" + GlobalReplicationGroupRegionPrefixFormat) @@ -134,10 +137,21 @@ func ResourceGlobalReplicationGroup() *schema.Resource { ForceNew: true, }, "global_replication_group_description": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: descriptionDiffSuppress, - StateFunc: descriptionStateFunc, + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: func(_, old, new string, _ *schema.ResourceData) bool { + if (old == EmptyDescription && new == "") || (old == "" && new == EmptyDescription) { + return true + } + return false + }, + StateFunc: func(v any) string { + s := v.(string) + if s == "" { + return EmptyDescription + } + return s + }, }, // global_replication_group_members cannot be correctly implemented because any secondary // replication groups will be added after this resource completes. @@ -197,21 +211,6 @@ func ResourceGlobalReplicationGroup() *schema.Resource { } } -func descriptionDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - if (old == EmptyDescription && new == "") || (old == "" && new == EmptyDescription) { - return true - } - return false -} - -func descriptionStateFunc(v any) string { - s := v.(string) - if s == "" { - return EmptyDescription - } - return s -} - func customizeDiffGlobalReplicationGroupEngineVersionErrorOnDowngrade(_ context.Context, diff *schema.ResourceDiff, _ any) error { if diff.Id() == "" || !diff.HasChange("engine_version") { return nil @@ -229,19 +228,13 @@ of the Global Replication Group and all Replication Group members. The AWS provi Please use the "-replace" option on the terraform plan and apply commands (see https://www.terraform.io/cli/commands/plan#replace-address).`, diff.Id()) } -type changeDiffer interface { - Id() string - GetChange(key string) (any, any) - HasChange(key string) bool -} - func customizeDiffGlobalReplicationGroupParamGroupNameRequiresMajorVersionUpgrade(_ context.Context, diff *schema.ResourceDiff, _ any) error { return paramGroupNameRequiresMajorVersionUpgrade(diff) } // parameter_group_name can only be set when doing a major update, // but we also should allow it to stay set afterwards -func paramGroupNameRequiresMajorVersionUpgrade(diff changeDiffer) error { +func paramGroupNameRequiresMajorVersionUpgrade(diff sdkv2.ResourceDiffer) error { o, n := diff.GetChange("parameter_group_name") if o.(string) == n.(string) { return nil @@ -274,7 +267,6 @@ func paramGroupNameRequiresMajorVersionUpgrade(diff changeDiffer) error { func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) id := d.Get("global_replication_group_id_suffix").(string) @@ -288,27 +280,25 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc } output, err := conn.CreateGlobalReplicationGroupWithContext(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache Global Replication Group (%s): %s", id, err) } - if output == nil || output.GlobalReplicationGroup == nil { - return sdkdiag.AppendErrorf(diags, "creating ElastiCache Global Replication Group (%s): empty result", id) - } - d.SetId(aws.StringValue(output.GlobalReplicationGroup.GlobalReplicationGroupId)) globalReplicationGroup, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) + if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Global Replication Group (%s) creation: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Global Replication Group (%s) create: %s", d.Id(), err) } if v, ok := d.GetOk("automatic_failover_enabled"); ok { if v := v.(bool); v == flattenGlobalReplicationGroupAutomaticFailoverEnabled(globalReplicationGroup.Members) { log.Printf("[DEBUG] Not updating ElastiCache Global Replication Group (%s) automatic failover: no change from %t", d.Id(), v) } else { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(v), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) automatic failover on creation: %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(v), "automatic failover", d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } } @@ -317,8 +307,8 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc if v.(string) == aws.StringValue(globalReplicationGroup.CacheNodeType) { log.Printf("[DEBUG] Not updating ElastiCache Global Replication Group (%s) node type: no change from %q", d.Id(), v) } else { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(v.(string)), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node type on creation: %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(v.(string)), "node type", d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } } @@ -340,47 +330,35 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc p := d.Get("parameter_group_name").(string) if diff[0] == 1 { - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(v.(string), p), d.Timeout(schema.TimeoutCreate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) engine version on creation: %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(v.(string), p), "engine version (major)", d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } else if diff[1] == 1 { if p != "" { return sdkdiag.AppendErrorf(diags, "cannot change parameter group name on minor engine version upgrade, upgrading from %s to %s", engineVersion.String(), requestedVersion.String()) } if t, _ := regexp.MatchString(`[6-9]\.x`, v.(string)); !t { - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(v.(string)), d.Timeout(schema.TimeoutCreate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) engine version on creation: %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(v.(string)), "engine version (minor)", d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } } } if v, ok := d.GetOk("num_node_groups"); ok { - current := len(globalReplicationGroup.GlobalNodeGroups) - requested := v.(int) - - if requested != current { - if requested > current { - err := globalReplcationGroupNodeGroupIncrease(ctx, conn, d.Id(), requested) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node groups on creation: %s", d.Id(), err) - } - } else if requested < current { - var ids []string - for _, v := range globalReplicationGroup.GlobalNodeGroups { - ids = append(ids, aws.StringValue(v.GlobalNodeGroupId)) + if oldNodeGroupCount, newNodeGroupCount := len(globalReplicationGroup.GlobalNodeGroups), v.(int); newNodeGroupCount != oldNodeGroupCount { + if newNodeGroupCount > oldNodeGroupCount { + if err := increaseGlobalReplicationGroupNodeGroupCount(ctx, conn, d.Id(), newNodeGroupCount, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } - err := globalReplicationGroupNodeGroupDecrease(ctx, conn, d.Id(), requested, ids) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node groups on creation: %s", d.Id(), err) + } else if newNodeGroupCount < oldNodeGroupCount { + ids := tfslices.ApplyToAll(globalReplicationGroup.GlobalNodeGroups, func(v *elasticache.GlobalNodeGroup) string { + return aws.StringValue(v.GlobalNodeGroupId) + }) + if err := decreaseGlobalReplicationGroupNodeGroupCount(ctx, conn, d.Id(), newNodeGroupCount, ids, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - - if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node groups on creation: waiting for completion: %s", d.Id(), err) - } } } @@ -389,21 +367,22 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - globalReplicationGroup, err := FindGlobalReplicationGroupByID(ctx, conn, d.Id()) + globalReplicationGroup, err := findGlobalReplicationGroupByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ElastiCache Global Replication Group (%s) not found, removing from state", d.Id()) d.SetId("") return diags } + if err != nil { return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): %s", d.Id(), err) } - if !d.IsNewResource() && (aws.StringValue(globalReplicationGroup.Status) == "deleting" || aws.StringValue(globalReplicationGroup.Status) == "deleted") { - log.Printf("[WARN] ElastiCache Global Replication Group (%s) in deleted state (%s), removing from state", d.Id(), aws.StringValue(globalReplicationGroup.Status)) + if status := aws.StringValue(globalReplicationGroup.Status); !d.IsNewResource() && (status == globalReplicationGroupStatusDeleting || status == globalReplicationGroupStatusDeleted) { + log.Printf("[WARN] ElastiCache Global Replication Group (%s) in deleted state (%s), removing from state", d.Id(), status) d.SetId("") return diags } @@ -433,23 +412,20 @@ func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceD return diags } -type globalReplicationGroupUpdater func(input *elasticache.ModifyGlobalReplicationGroupInput) - func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) // Only one field can be changed per request if d.HasChange("cache_node_type") { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(d.Get("cache_node_type").(string)), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node type: %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(d.Get("cache_node_type").(string)), "node type", d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } if d.HasChange("automatic_failover_enabled") { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(d.Get("automatic_failover_enabled").(bool)), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) automatic failover: %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(d.Get("automatic_failover_enabled").(bool)), "automatic failover", d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } @@ -462,56 +438,60 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc diff := diffVersion(newVersion, oldVersion) if diff[0] == 1 { p := d.Get("parameter_group_name").(string) - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(n.(string), p), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s): %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(n.(string), p), "engine version (major)", d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } else if diff[1] == 1 { - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(n.(string)), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s): %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(n.(string)), "engine version (minor)", d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } } if d.HasChange("global_replication_group_description") { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupDescriptionUpdater(d.Get("global_replication_group_description").(string)), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) description: %s", d.Id(), err) + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupDescriptionUpdater(d.Get("global_replication_group_description").(string)), "description", d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } if d.HasChange("num_node_groups") { o, n := d.GetChange("num_node_groups") - current := o.(int) - requested := n.(int) - - if requested != current { - if requested > current { - err := globalReplcationGroupNodeGroupIncrease(ctx, conn, d.Id(), requested) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node groups: %s", d.Id(), err) - } - } else if requested < current { - var ids []string - for _, v := range d.Get("global_node_groups").(*schema.Set).List() { - v := v.(map[string]any) - ids = append(ids, v["global_node_group_id"].(string)) + oldNodeGroupCount, newNodeGroupCount := o.(int), n.(int) + + if newNodeGroupCount != oldNodeGroupCount { + if newNodeGroupCount > oldNodeGroupCount { + if err := increaseGlobalReplicationGroupNodeGroupCount(ctx, conn, d.Id(), newNodeGroupCount, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } - err := globalReplicationGroupNodeGroupDecrease(ctx, conn, d.Id(), requested, ids) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node groups: %s", d.Id(), err) + } else if newNodeGroupCount < oldNodeGroupCount { + ids := tfslices.ApplyToAll(d.Get("global_node_groups").(*schema.Set).List(), func(tfMapRaw interface{}) string { + tfMap := tfMapRaw.(map[string]interface{}) + return tfMap["global_node_group_id"].(string) + }) + if err := decreaseGlobalReplicationGroupNodeGroupCount(ctx, conn, d.Id(), newNodeGroupCount, ids, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - - if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) node groups: waiting for completion: %s", d.Id(), err) - } } } return append(diags, resourceGlobalReplicationGroupRead(ctx, d, meta)...) } +func resourceGlobalReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + + // Using Update timeout because the Global Replication Group could be in the middle of an update operation. + if err := deleteGlobalReplicationGroup(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + return diags +} + +type globalReplicationGroupUpdater func(input *elasticache.ModifyGlobalReplicationGroupInput) + func globalReplicationGroupDescriptionUpdater(description string) globalReplicationGroupUpdater { return func(input *elasticache.ModifyGlobalReplicationGroupInput) { input.GlobalReplicationGroupDescription = aws.String(description) @@ -543,7 +523,7 @@ func globalReplicationGroupNodeTypeUpdater(nodeType string) globalReplicationGro } } -func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, f globalReplicationGroupUpdater, timeout time.Duration) error { +func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, f globalReplicationGroupUpdater, propertyName string, timeout time.Duration) error { input := &elasticache.ModifyGlobalReplicationGroupInput{ ApplyImmediately: aws.Bool(true), GlobalReplicationGroupId: aws.String(id), @@ -551,28 +531,66 @@ func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiC f(input) if _, err := conn.ModifyGlobalReplicationGroupWithContext(ctx, input); err != nil { - return err + return fmt.Errorf("updating ElastiCache Global Replication Group (%s) %s: %w", id, propertyName, err) } if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, id, timeout); err != nil { - return fmt.Errorf("waiting for completion: %w", err) + return fmt.Errorf("waiting for ElastiCache Global Replication Group (%s) update: %w", id, err) } return nil } -func resourceGlobalReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - var diags diag.Diagnostics +func increaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *elasticache.ElastiCache, id string, newNodeGroupCount int, timeout time.Duration) error { + input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalReplicationGroupId: aws.String(id), + NodeGroupCount: aws.Int64(int64(newNodeGroupCount)), + } - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) - // Using Update timeout because the Global Replication Group could be in the middle of an update operation - err := deleteGlobalReplicationGroup(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), d.Timeout(schema.TimeoutDelete)) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting ElastiCache Global Replication Group: %s", err) + return fmt.Errorf("increasing ElastiCache Global Replication Group (%s) node group count (%d): %w", id, newNodeGroupCount, err) } - return diags + if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, id, timeout); err != nil { + return fmt.Errorf("waiting for ElastiCache Global Replication Group (%s) update: %w", id, err) + } + + return nil +} + +func decreaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *elasticache.ElastiCache, id string, newNodeGroupCount int, nodeGroupIDs []string, timeout time.Duration) error { + slices.SortFunc(nodeGroupIDs, func(a, b string) int { + if globalReplicationGroupNodeNumber(a) < globalReplicationGroupNodeNumber(b) { + return -1 + } + if globalReplicationGroupNodeNumber(a) > globalReplicationGroupNodeNumber(b) { + return 1 + } + return 0 + }) + nodeGroupIDs = nodeGroupIDs[:newNodeGroupCount] + + input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalNodeGroupsToRetain: aws.StringSlice(nodeGroupIDs), + GlobalReplicationGroupId: aws.String(id), + NodeGroupCount: aws.Int64(int64(newNodeGroupCount)), + } + + _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + + if err != nil { + return fmt.Errorf("decreasing ElastiCache Global Replication Group (%s) node group count (%d): %w", id, newNodeGroupCount, err) + } + + if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, id, timeout); err != nil { + return fmt.Errorf("waiting for ElastiCache Global Replication Group (%s) update: %w", id, err) + } + + return nil } func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, readyTimeout, deleteTimeout time.Duration) error { @@ -581,38 +599,208 @@ func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiC RetainPrimaryReplicationGroup: aws.Bool(true), } - err := retry.RetryContext(ctx, readyTimeout, func() *retry.RetryError { - _, err := conn.DeleteGlobalReplicationGroupWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { - return retry.NonRetryableError(&retry.NotFoundError{ - LastError: err, - LastRequest: input, - }) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, readyTimeout, func() (interface{}, error) { + return conn.DeleteGlobalReplicationGroupWithContext(ctx, input) + }, elasticache.ErrCodeInvalidGlobalReplicationGroupStateFault) + + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { + return nil + } + + if err != nil { + return fmt.Errorf("deleting ElastiCache Global Replication Group (%s): %w", id, err) + } + + if _, err := waitGlobalReplicationGroupDeleted(ctx, conn, id, deleteTimeout); err != nil { + return fmt.Errorf("waiting for ElastiCache Global Replication Group (%s) delete: %w", id, err) + } + + return nil +} + +func findGlobalReplicationGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.GlobalReplicationGroup, error) { + input := &elasticache.DescribeGlobalReplicationGroupsInput{ + GlobalReplicationGroupId: aws.String(id), + ShowMemberInfo: aws.Bool(true), + } + + return findGlobalReplicationGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.GlobalReplicationGroup]()) +} + +func findGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeGlobalReplicationGroupsInput, filter tfslices.Predicate[*elasticache.GlobalReplicationGroup]) (*elasticache.GlobalReplicationGroup, error) { + output, err := findGlobalReplicationGroups(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findGlobalReplicationGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeGlobalReplicationGroupsInput, filter tfslices.Predicate[*elasticache.GlobalReplicationGroup]) ([]*elasticache.GlobalReplicationGroup, error) { + var output []*elasticache.GlobalReplicationGroup + + err := conn.DescribeGlobalReplicationGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeGlobalReplicationGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.GlobalReplicationGroups { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeInvalidGlobalReplicationGroupStateFault) { - return retry.RetryableError(err) + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findGlobalReplicationGroupByID(ctx, conn, globalReplicationGroupID) + + if tfresource.NotFound(err) { + return nil, "", nil } + if err != nil { - return retry.NonRetryableError(err) + return nil, "", err } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteGlobalReplicationGroupWithContext(ctx, input) + return output, aws.StringValue(output.Status), nil } - if tfresource.NotFound(err) { - return nil +} + +const ( + globalReplicationGroupDefaultCreatedTimeout = 60 * time.Minute + globalReplicationGroupDefaultUpdatedTimeout = 60 * time.Minute + globalReplicationGroupDefaultDeletedTimeout = 20 * time.Minute +) + +const ( + globalReplicationGroupStatusAvailable = "available" + globalReplicationGroupStatusCreating = "creating" + globalReplicationGroupStatusDeleted = "deleted" + globalReplicationGroupStatusDeleting = "deleting" + globalReplicationGroupStatusModifying = "modifying" + globalReplicationGroupStatusPrimaryOnly = "primary-only" +) + +func waitGlobalReplicationGroupAvailable(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{globalReplicationGroupStatusCreating, globalReplicationGroupStatusModifying}, + Target: []string{globalReplicationGroupStatusAvailable, globalReplicationGroupStatusPrimaryOnly}, + Refresh: statusGlobalReplicationGroup(ctx, conn, globalReplicationGroupID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*elasticache.GlobalReplicationGroup); ok { + return output, err } + + return nil, err +} + +func waitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + globalReplicationGroupStatusAvailable, + globalReplicationGroupStatusPrimaryOnly, + globalReplicationGroupStatusModifying, + globalReplicationGroupStatusDeleting, + }, + Target: []string{}, + Refresh: statusGlobalReplicationGroup(ctx, conn, globalReplicationGroupID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*elasticache.GlobalReplicationGroup); ok { + return output, err + } + + return nil, err +} + +func findGlobalReplicationGroupMemberByID(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID string) (*elasticache.GlobalReplicationGroupMember, error) { + globalReplicationGroup, err := findGlobalReplicationGroupByID(ctx, conn, globalReplicationGroupID) + if err != nil { - return err + return nil, err } - if _, err := waitGlobalReplicationGroupDeleted(ctx, conn, id, deleteTimeout); err != nil { - return fmt.Errorf("waiting for completion: %w", err) + if len(globalReplicationGroup.Members) == 0 { + return nil, tfresource.NewEmptyResultError(nil) } - return nil + for _, v := range globalReplicationGroup.Members { + if aws.StringValue(v.ReplicationGroupId) == replicationGroupID { + return v, nil + } + } + + return nil, &retry.NotFoundError{ + Message: fmt.Sprintf("Replication Group (%s) not found in Global Replication Group (%s)", replicationGroupID, globalReplicationGroupID), + } +} + +func statusGlobalReplicationGroupMember(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findGlobalReplicationGroupMemberByID(ctx, conn, globalReplicationGroupID, replicationGroupID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status), nil + } +} + +const ( + globalReplicationGroupMemberStatusAssociated = "associated" +) + +func waitGlobalReplicationGroupMemberDetached(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroupMember, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{globalReplicationGroupMemberStatusAssociated}, + Target: []string{}, + Refresh: statusGlobalReplicationGroupMember(ctx, conn, globalReplicationGroupID, replicationGroupID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*elasticache.GlobalReplicationGroupMember); ok { + return output, err + } + + return nil, err } func flattenGlobalReplicationGroupAutomaticFailoverEnabled(members []*elasticache.GlobalReplicationGroupMember) bool { @@ -662,45 +850,13 @@ func flattenGlobalNodeGroup(nodeGroup *elasticache.GlobalNodeGroup) map[string]a func flattenGlobalReplicationGroupPrimaryGroupID(members []*elasticache.GlobalReplicationGroupMember) string { for _, member := range members { - if aws.StringValue(member.Role) == GlobalReplicationGroupMemberRolePrimary { + if aws.StringValue(member.Role) == globalReplicationGroupMemberRolePrimary { return aws.StringValue(member.ReplicationGroupId) } } return "" } -func globalReplcationGroupNodeGroupIncrease(ctx context.Context, conn *elasticache.ElastiCache, id string, requested int) error { - input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - GlobalReplicationGroupId: aws.String(id), - NodeGroupCount: aws.Int64(int64(requested)), - } - _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) - return err -} - -func globalReplicationGroupNodeGroupDecrease(ctx context.Context, conn *elasticache.ElastiCache, id string, requested int, nodeGroupIDs []string) error { - slices.SortFunc(nodeGroupIDs, func(a, b string) int { - if globalReplicationGroupNodeNumber(a) < globalReplicationGroupNodeNumber(b) { - return -1 - } - if globalReplicationGroupNodeNumber(a) > globalReplicationGroupNodeNumber(b) { - return 1 - } - return 0 - }) - nodeGroupIDs = nodeGroupIDs[:requested] - - input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - GlobalReplicationGroupId: aws.String(id), - NodeGroupCount: aws.Int64(int64(requested)), - GlobalNodeGroupsToRetain: aws.StringSlice(nodeGroupIDs), - } - _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) - return err -} - func globalReplicationGroupNodeNumber(id string) int { re := regexache.MustCompile(`^.+-0{0,3}(\d+)$`) matches := re.FindStringSubmatch(id) diff --git a/internal/service/elasticache/global_replication_group_test.go b/internal/service/elasticache/global_replication_group_test.go index 58bd08955ea..cd45e2a845a 100644 --- a/internal/service/elasticache/global_replication_group_test.go +++ b/internal/service/elasticache/global_replication_group_test.go @@ -31,7 +31,6 @@ func TestAccElastiCacheGlobalReplicationGroup_basic(t *testing.T) { var globalReplicationGroup elasticache.GlobalReplicationGroup var primaryReplicationGroup elasticache.ReplicationGroup - var pg elasticache.CacheParameterGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -45,7 +44,6 @@ func TestAccElastiCacheGlobalReplicationGroup_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupDestroy(ctx), - testAccCheckGlobalReplicationGroupMemberParameterGroupDestroy(ctx, &pg), ), Steps: []resource.TestStep{ { @@ -53,7 +51,6 @@ func TestAccElastiCacheGlobalReplicationGroup_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckGlobalReplicationGroupExists(ctx, resourceName, &globalReplicationGroup), testAccCheckReplicationGroupExists(ctx, primaryReplicationGroupResourceName, &primaryReplicationGroup), - testAccCheckReplicationGroupParameterGroup(ctx, &primaryReplicationGroup, &pg), acctest.MatchResourceAttrGlobalARN(resourceName, "arn", "elasticache", regexache.MustCompile(`globalreplicationgroup:`+tfelasticache.GlobalReplicationGroupRegionPrefixFormat+rName)), resource.TestCheckResourceAttrPair(resourceName, "at_rest_encryption_enabled", primaryReplicationGroupResourceName, "at_rest_encryption_enabled"), resource.TestCheckResourceAttr(resourceName, "auth_token_enabled", "false"), @@ -1457,7 +1454,7 @@ func testAccCheckGlobalReplicationGroupExists(ctx context.Context, resourceName return fmt.Errorf("retrieving ElastiCache Global Replication Group (%s): %w", rs.Primary.ID, err) } - if aws.StringValue(grg.Status) == tfelasticache.GlobalReplicationGroupStatusDeleting || aws.StringValue(grg.Status) == tfelasticache.GlobalReplicationGroupStatusDeleted { + if aws.StringValue(grg.Status) == "deleting" || aws.StringValue(grg.Status) == "deleted" { return fmt.Errorf("ElastiCache Global Replication Group (%s) exists, but is in a non-available state: %s", rs.Primary.ID, aws.StringValue(grg.Status)) } diff --git a/internal/service/elasticache/parameter_group.go b/internal/service/elasticache/parameter_group.go index 9fe188ece78..5db5edf801d 100644 --- a/internal/service/elasticache/parameter_group.go +++ b/internal/service/elasticache/parameter_group.go @@ -4,7 +4,6 @@ package elasticache import ( - "bytes" "context" "fmt" "log" @@ -18,9 +17,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -29,7 +29,7 @@ import ( // @SDKResource("aws_elasticache_parameter_group", name="Parameter Group") // @Tags(identifierAttribute="arn") -func ResourceParameterGroup() *schema.Resource { +func resourceParameterGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceParameterGroupCreate, ReadWithoutTimeout: resourceParameterGroupRead, @@ -79,7 +79,7 @@ func ResourceParameterGroup() *schema.Resource { }, }, }, - Set: ParameterHash, + Set: parameterHash, }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), @@ -124,7 +124,7 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - parameterGroup, err := FindParameterGroupByName(ctx, conn, d.Id()) + parameterGroup, err := findCacheParameterGroupByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ElastiCache Parameter Group (%s) not found, removing from state", d.Id()) @@ -153,7 +153,7 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading ElastiCache Parameter Group (%s) parameters: %s", d.Id(), err) } - d.Set("parameter", FlattenParameters(output.Parameters)) + d.Set("parameter", flattenParameters(output.Parameters)) return diags } @@ -164,10 +164,7 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m if d.HasChange("parameter") { o, n := d.GetChange("parameter") - toRemove, toAdd := ParameterChanges(o, n) - - log.Printf("[DEBUG] Parameters to remove: %#v", toRemove) - log.Printf("[DEBUG] Parameters to add or update: %#v", toAdd) + toRemove, toAdd := parameterChanges(o, n) // We can only modify 20 parameters at a time, so walk them until // we've got them all. @@ -284,57 +281,46 @@ func resourceParameterGroupDelete(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - err := deleteParameterGroup(ctx, conn, d.Id()) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { - return diags - } - if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting ElastiCache Parameter Group (%s): %s", d.Id(), err) + log.Printf("[INFO] Deleting ElastiCache Parameter Group: %s", d.Id()) + if err := deleteParameterGroup(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendFromErr(diags, err) } + return diags } func deleteParameterGroup(ctx context.Context, conn *elasticache.ElastiCache, name string) error { - deleteOpts := elasticache.DeleteCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(name), - } - err := retry.RetryContext(ctx, 3*time.Minute, func() *retry.RetryError { - _, err := conn.DeleteCacheParameterGroupWithContext(ctx, &deleteOpts) - if err != nil { - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { - return nil - } - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeInvalidCacheParameterGroupStateFault) { - return retry.RetryableError(err) - } - return retry.NonRetryableError(err) - } + const ( + timeout = 3 * time.Minute + ) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { + return conn.DeleteCacheParameterGroupWithContext(ctx, &elasticache.DeleteCacheParameterGroupInput{ + CacheParameterGroupName: aws.String(name), + }) + }, elasticache.ErrCodeInvalidCacheParameterGroupStateFault) + + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteCacheParameterGroupWithContext(ctx, &deleteOpts) + } + + if err != nil { + return fmt.Errorf("deleting ElastiCache Parameter Group (%s): %s", name, err) } return err } -func ParameterHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) - buf.WriteString(fmt.Sprintf("%s-", m["value"].(string))) - - return create.StringHashcode(buf.String()) -} +var ( + parameterHash = sdkv2.SimpleSchemaSetFunc("name", "value") +) -func ParameterChanges(o, n interface{}) (remove, addOrUpdate []*elasticache.ParameterNameValue) { +func parameterChanges(o, n interface{}) (remove, addOrUpdate []*elasticache.ParameterNameValue) { if o == nil { o = new(schema.Set) } if n == nil { n = new(schema.Set) } - os := o.(*schema.Set) ns := n.(*schema.Set) @@ -395,23 +381,73 @@ func resourceModifyParameterGroup(ctx context.Context, conn *elasticache.ElastiC return err } -// Flattens an array of Parameters into a []map[string]interface{} -func FlattenParameters(list []*elasticache.Parameter) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - if i.ParameterValue != nil { - result = append(result, map[string]interface{}{ - "name": strings.ToLower(aws.StringValue(i.ParameterName)), - "value": aws.StringValue(i.ParameterValue), - }) +func findCacheParameterGroupByName(ctx context.Context, conn *elasticache.ElastiCache, name string) (*elasticache.CacheParameterGroup, error) { + input := &elasticache.DescribeCacheParameterGroupsInput{ + CacheParameterGroupName: aws.String(name), + } + + return findCacheParameterGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.CacheParameterGroup]()) +} + +func findCacheParameterGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheParameterGroupsInput, filter tfslices.Predicate[*elasticache.CacheParameterGroup]) (*elasticache.CacheParameterGroup, error) { + output, err := findCacheParameterGroups(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findCacheParameterGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheParameterGroupsInput, filter tfslices.Predicate[*elasticache.CacheParameterGroup]) ([]*elasticache.CacheParameterGroup, error) { + var output []*elasticache.CacheParameterGroup + + err := conn.DescribeCacheParameterGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheParameterGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.CacheParameterGroups { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return result + + if err != nil { + return nil, err + } + + return output, nil } -func expandParameter(param map[string]interface{}) *elasticache.ParameterNameValue { +func expandParameter(tfMap map[string]interface{}) *elasticache.ParameterNameValue { return &elasticache.ParameterNameValue{ - ParameterName: aws.String(param["name"].(string)), - ParameterValue: aws.String(param["value"].(string)), + ParameterName: aws.String(tfMap["name"].(string)), + ParameterValue: aws.String(tfMap["value"].(string)), } } + +func flattenParameters(apiObjects []*elasticache.Parameter) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + if apiObject.ParameterValue != nil { + tfList = append(tfList, map[string]interface{}{ + "name": strings.ToLower(aws.StringValue(apiObject.ParameterName)), + "value": aws.StringValue(apiObject.ParameterValue), + }) + } + } + + return tfList +} diff --git a/internal/service/elasticache/parameter_group_test.go b/internal/service/elasticache/parameter_group_test.go index 81c2ca4ed12..b2e40f8b0db 100644 --- a/internal/service/elasticache/parameter_group_test.go +++ b/internal/service/elasticache/parameter_group_test.go @@ -455,7 +455,7 @@ func testAccCheckParameterGroupDestroy(ctx context.Context) resource.TestCheckFu continue } - _, err := tfelasticache.FindParameterGroupByName(ctx, conn, rs.Primary.ID) + _, err := tfelasticache.FindCacheParameterGroupByName(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -485,7 +485,7 @@ func testAccCheckParameterGroupExists(ctx context.Context, n string, v *elastica conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - output, err := tfelasticache.FindParameterGroupByName(ctx, conn, rs.Primary.ID) + output, err := tfelasticache.FindCacheParameterGroupByName(ctx, conn, rs.Primary.ID) if err != nil { return err @@ -590,37 +590,6 @@ resource "aws_elasticache_parameter_group" "test" { `, family, rName, tagName1, tagValue1, tagName2, tagValue2) } -func TestFlattenParameters(t *testing.T) { - t.Parallel() - - cases := []struct { - Input []*elasticache.Parameter - Output []map[string]interface{} - }{ - { - Input: []*elasticache.Parameter{ - { - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - }, - }, - Output: []map[string]interface{}{ - { - "name": "activerehashing", - "value": "yes", - }, - }, - }, - } - - for _, tc := range cases { - output := tfelasticache.FlattenParameters(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - func TestParameterChanges(t *testing.T) { t.Parallel() diff --git a/internal/service/elasticache/replication_group.go b/internal/service/elasticache/replication_group.go index 0f5366a710e..430db5fb0e5 100644 --- a/internal/service/elasticache/replication_group.go +++ b/internal/service/elasticache/replication_group.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "log" + "slices" "strings" "time" @@ -26,6 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" "github.com/hashicorp/terraform-provider-aws/internal/semver" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -34,7 +36,7 @@ import ( // @SDKResource("aws_elasticache_replication_group", name="Replication Group") // @Tags(identifierAttribute="arn") -func ResourceReplicationGroup() *schema.Resource { +func resourceReplicationGroup() *schema.Resource { //lintignore:R011 return &schema.Resource{ CreateWithoutTimeout: resourceReplicationGroupCreate, @@ -123,6 +125,10 @@ func ResourceReplicationGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "final_snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + }, "global_replication_group_id": { Type: schema.TypeString, Optional: true, @@ -148,6 +154,11 @@ func ResourceReplicationGroup() *schema.Resource { Computed: true, ValidateFunc: validation.StringInSlice(elasticache.IpDiscovery_Values(), false), }, + "kms_key_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, "log_delivery_configuration": { Type: schema.TypeSet, Optional: true, @@ -190,7 +201,6 @@ func ResourceReplicationGroup() *schema.Resource { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, "multi_az_enabled": { Type: schema.TypeBool, @@ -279,14 +289,12 @@ func ResourceReplicationGroup() *schema.Resource { Computed: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, "security_group_ids": { Type: schema.TypeSet, Optional: true, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, "snapshot_arns": { Type: schema.TypeSet, @@ -300,7 +308,6 @@ func ResourceReplicationGroup() *schema.Resource { validation.StringDoesNotContainAny(","), ), }, - Set: schema.HashString, }, "snapshot_retention_limit": { Type: schema.TypeInt, @@ -341,18 +348,8 @@ func ResourceReplicationGroup() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, ConflictsWith: []string{"auth_token"}, }, - "kms_key_id": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - "final_snapshot_identifier": { - Type: schema.TypeString, - Optional: true, - }, }, SchemaVersion: 2, @@ -376,13 +373,13 @@ func ResourceReplicationGroup() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(ReplicationGroupDefaultCreatedTimeout), - Delete: schema.DefaultTimeout(ReplicationGroupDefaultDeletedTimeout), - Update: schema.DefaultTimeout(ReplicationGroupDefaultUpdatedTimeout), + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(40 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), }, CustomizeDiff: customdiff.Sequence( - CustomizeDiffValidateReplicationGroupAutomaticFailover, + customizeDiffValidateReplicationGroupAutomaticFailover, customizeDiffEngineVersionForceNewOnDowngrade, customdiff.ComputedIf("member_clusters", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { return diff.HasChange("num_cache_clusters") || @@ -409,14 +406,32 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, Tags: getTagsIn(ctx), } - if v, ok := d.GetOk("description"); ok { - input.ReplicationGroupDescription = aws.String(v.(string)) + if _, ok := d.GetOk("at_rest_encryption_enabled"); ok { + input.AtRestEncryptionEnabled = aws.Bool(d.Get("at_rest_encryption_enabled").(bool)) + } + + if v, ok := d.GetOk("auth_token"); ok { + input.AuthToken = aws.String(v.(string)) + } + + if v, ok := d.GetOk("auto_minor_version_upgrade"); ok { + if v, null, _ := nullable.Bool(v.(string)).ValueBool(); !null { + input.AutoMinorVersionUpgrade = aws.Bool(v) + } } if v, ok := d.GetOk("data_tiering_enabled"); ok { input.DataTieringEnabled = aws.Bool(v.(bool)) } + if v, ok := d.GetOk("description"); ok { + input.ReplicationGroupDescription = aws.String(v.(string)) + } + + if v, ok := d.GetOk("engine_version"); ok { + input.EngineVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("global_replication_group_id"); ok { input.GlobalReplicationGroupId = aws.String(v.(string)) } else { @@ -430,119 +445,104 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, input.Engine = aws.String(d.Get("engine").(string)) } - if v, ok := d.GetOk("engine_version"); ok { - input.EngineVersion = aws.String(v.(string)) + if v, ok := d.GetOk("ip_discovery"); ok { + input.IpDiscovery = aws.String(v.(string)) } - if v, ok := d.GetOk("auto_minor_version_upgrade"); ok { - if v, null, _ := nullable.Bool(v.(string)).ValueBool(); !null { - input.AutoMinorVersionUpgrade = aws.Bool(v) - } + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) } - if preferredAZs, ok := d.GetOk("preferred_cache_cluster_azs"); ok { - input.PreferredCacheClusterAZs = flex.ExpandStringList(preferredAZs.([]interface{})) + if v, ok := d.GetOk("log_delivery_configuration"); ok && v.(*schema.Set).Len() > 0 { + for _, tfMapRaw := range v.(*schema.Set).List() { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + apiObject := expandLogDeliveryConfigurations(tfMap) + input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &apiObject) + } } - if v, ok := d.GetOk("parameter_group_name"); ok { - input.CacheParameterGroupName = aws.String(v.(string)) + if v, ok := d.GetOk("maintenance_window"); ok { + input.PreferredMaintenanceWindow = aws.String(v.(string)) } - if v, ok := d.GetOk("ip_discovery"); ok { - input.IpDiscovery = aws.String(v.(string)) + if v, ok := d.GetOk("multi_az_enabled"); ok { + input.MultiAZEnabled = aws.Bool(v.(bool)) } if v, ok := d.GetOk("network_type"); ok { input.NetworkType = aws.String(v.(string)) } - if v, ok := d.GetOk("port"); ok { - input.Port = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("subnet_group_name"); ok { - input.CacheSubnetGroupName = aws.String(v.(string)) + if v, ok := d.GetOk("notification_topic_arn"); ok { + input.NotificationTopicArn = aws.String(v.(string)) } - if SGNames := d.Get("security_group_names").(*schema.Set); SGNames.Len() > 0 { - input.CacheSecurityGroupNames = flex.ExpandStringSet(SGNames) + if v, ok := d.GetOk("num_cache_clusters"); ok { + input.NumCacheClusters = aws.Int64(int64(v.(int))) } - if SGIds := d.Get("security_group_ids").(*schema.Set); SGIds.Len() > 0 { - input.SecurityGroupIds = flex.ExpandStringSet(SGIds) + if v, ok := d.GetOk("num_node_groups"); ok && v != 0 { + input.NumNodeGroups = aws.Int64(int64(v.(int))) } - if snaps := d.Get("snapshot_arns").(*schema.Set); snaps.Len() > 0 { - input.SnapshotArns = flex.ExpandStringSet(snaps) + if v, ok := d.GetOk("parameter_group_name"); ok { + input.CacheParameterGroupName = aws.String(v.(string)) } - if v, ok := d.GetOk("log_delivery_configuration"); ok { - input.LogDeliveryConfigurations = []*elasticache.LogDeliveryConfigurationRequest{} - v := v.(*schema.Set).List() - for _, v := range v { - logDeliveryConfigurationRequest := expandLogDeliveryConfigurations(v.(map[string]interface{})) - input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) - } + if v, ok := d.GetOk("port"); ok { + input.Port = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("maintenance_window"); ok { - input.PreferredMaintenanceWindow = aws.String(v.(string)) + if v, ok := d.GetOk("preferred_cache_cluster_azs"); ok && len(v.([]interface{})) > 0 { + input.PreferredCacheClusterAZs = flex.ExpandStringList(v.([]interface{})) } - if _, ok := d.GetOk("multi_az_enabled"); ok { - input.MultiAZEnabled = aws.Bool(d.Get("multi_az_enabled").(bool)) + if v, ok := d.GetOk("replicas_per_node_group"); ok { + input.ReplicasPerNodeGroup = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("notification_topic_arn"); ok { - input.NotificationTopicArn = aws.String(v.(string)) + if v, ok := d.GetOk("subnet_group_name"); ok { + input.CacheSubnetGroupName = aws.String(v.(string)) } - if v, ok := d.GetOk("kms_key_id"); ok { - input.KmsKeyId = aws.String(v.(string)) + if v, ok := d.GetOk("security_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } - if v, ok := d.GetOk("snapshot_retention_limit"); ok { - input.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("security_group_names"); ok && v.(*schema.Set).Len() > 0 { + input.CacheSecurityGroupNames = flex.ExpandStringSet(v.(*schema.Set)) } - if v, ok := d.GetOk("snapshot_window"); ok { - input.SnapshotWindow = aws.String(v.(string)) + if v, ok := d.GetOk("snapshot_arns"); ok && v.(*schema.Set).Len() > 0 { + input.SnapshotArns = flex.ExpandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("snapshot_name"); ok { input.SnapshotName = aws.String(v.(string)) } - if _, ok := d.GetOk("transit_encryption_enabled"); ok { - input.TransitEncryptionEnabled = aws.Bool(d.Get("transit_encryption_enabled").(bool)) - } - - if v, ok := d.GetOk("transit_encryption_mode"); ok { - input.TransitEncryptionMode = aws.String(v.(string)) - } - - if _, ok := d.GetOk("at_rest_encryption_enabled"); ok { - input.AtRestEncryptionEnabled = aws.Bool(d.Get("at_rest_encryption_enabled").(bool)) - } - - if v, ok := d.GetOk("auth_token"); ok { - input.AuthToken = aws.String(v.(string)) + if v, ok := d.GetOk("snapshot_retention_limit"); ok { + input.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("num_node_groups"); ok && v != 0 { - input.NumNodeGroups = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("snapshot_window"); ok { + input.SnapshotWindow = aws.String(v.(string)) } - if v, ok := d.GetOk("replicas_per_node_group"); ok { - input.ReplicasPerNodeGroup = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("transit_encryption_enabled"); ok { + input.TransitEncryptionEnabled = aws.Bool(v.(bool)) } - if numCacheClusters, ok := d.GetOk("num_cache_clusters"); ok { - input.NumCacheClusters = aws.Int64(int64(numCacheClusters.(int))) + if v, ok := d.GetOk("transit_encryption_mode"); ok { + input.TransitEncryptionMode = aws.String(v.(string)) } - if userGroupIds := d.Get("user_group_ids").(*schema.Set); userGroupIds.Len() > 0 { - input.UserGroupIds = flex.ExpandStringSet(userGroupIds) + if v, ok := d.GetOk("user_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.UserGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } output, err := conn.CreateReplicationGroupWithContext(ctx, input) @@ -560,7 +560,10 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, d.SetId(aws.StringValue(output.ReplicationGroup.ReplicationGroupId)) - if _, err := WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate), replicationGroupAvailableCreateDelay); err != nil { + const ( + delay = 30 * time.Second + ) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate), delay); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) create: %s", d.Id(), err) } @@ -570,7 +573,7 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, // to be fully added to the global replication group. // API calls to the global replication group can be made in any region. if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, v.(string), globalReplicationGroupDefaultCreatedTimeout); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Global Replication Group (%s) to be available: %s", v, err) + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Global Replication Group (%s) available: %s", v, err) } } @@ -595,17 +598,19 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - rgp, err := FindReplicationGroupByID(ctx, conn, d.Id()) + rgp, err := findReplicationGroupByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ElastiCache Replication Group (%s) not found, removing from state", d.Id()) d.SetId("") return diags } + if err != nil { return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): %s", d.Id(), err) } - if aws.StringValue(rgp.Status) == ReplicationGroupStatusDeleting { + if aws.StringValue(rgp.Status) == replicationGroupStatusDeleting { log.Printf("[WARN] ElastiCache Replication Group (%s) is currently in the `deleting` status, removing from state", d.Id()) d.SetId("") return diags @@ -681,7 +686,10 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m // Tags cannot be read when the replication group is not Available log.Printf("[DEBUG] Waiting for ElastiCache Replication Group (%s) to become available", d.Id()) - _, err = WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableReadDelay) + const ( + delay = 0 * time.Second + ) + _, err = waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group to be available (%s): %s", aws.StringValue(rgp.ARN), err) } @@ -727,19 +735,19 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) if d.HasChangesExcept("tags", "tags_all") { - if d.HasChanges( - "num_node_groups", - "replicas_per_node_group", - ) { - err := modifyReplicationGroupShardConfiguration(ctx, conn, d) - if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s) shard configuration: %s", d.Id(), err) + o, n := d.GetChange("num_cache_clusters") + oldCacheClusterCount, newCacheClusterCount := o.(int), n.(int) + + if d.HasChanges("num_node_groups", "replicas_per_node_group") { + if err := modifyReplicationGroupShardConfiguration(ctx, conn, d); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } else if d.HasChange("num_cache_clusters") { - err := modifyReplicationGroupNumCacheClusters(ctx, conn, d, "num_cache_clusters") - if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s) clusters: %s", d.Id(), err) - } + if newCacheClusterCount > oldCacheClusterCount { + if err := increaseReplicationGroupReplicaCount(ctx, conn, d.Id(), newCacheClusterCount, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } // Else defer until after all other modifications are made. } requestUpdate := false @@ -748,19 +756,13 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, ReplicationGroupId: aws.String(d.Id()), } - if d.HasChange("description") { - input.ReplicationGroupDescription = aws.String(d.Get("description").(string)) - requestUpdate = true - } - - if d.HasChange("ip_discovery") { - input.IpDiscovery = aws.String(d.Get("ip_discovery").(string)) - requestUpdate = true - } - - if d.HasChange("network_type") { - input.IpDiscovery = aws.String(d.Get("network_type").(string)) - requestUpdate = true + if d.HasChange("auto_minor_version_upgrade") { + if v, ok := d.GetOk("auto_minor_version_upgrade"); ok { + if v, null, _ := nullable.Bool(v.(string)).ValueBool(); !null { + input.AutoMinorVersionUpgrade = aws.Bool(v) + requestUpdate = true + } + } } if d.HasChange("automatic_failover_enabled") { @@ -768,42 +770,35 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, requestUpdate = true } - if d.HasChange("auto_minor_version_upgrade") { - v := d.Get("auto_minor_version_upgrade") - if v, null, _ := nullable.Bool(v.(string)).ValueBool(); !null { - input.AutoMinorVersionUpgrade = aws.Bool(v) - } + if d.HasChange("description") { + input.ReplicationGroupDescription = aws.String(d.Get("description").(string)) requestUpdate = true } - if d.HasChange("security_group_ids") { - if attr := d.Get("security_group_ids").(*schema.Set); attr.Len() > 0 { - input.SecurityGroupIds = flex.ExpandStringSet(attr) - requestUpdate = true - } + if d.HasChange("engine_version") { + input.EngineVersion = aws.String(d.Get("engine_version").(string)) + requestUpdate = true } - if d.HasChange("security_group_names") { - if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { - input.CacheSecurityGroupNames = flex.ExpandStringSet(attr) - requestUpdate = true - } + if d.HasChange("ip_discovery") { + input.IpDiscovery = aws.String(d.Get("ip_discovery").(string)) + requestUpdate = true } if d.HasChange("log_delivery_configuration") { - oldLogDeliveryConfig, newLogDeliveryConfig := d.GetChange("log_delivery_configuration") + o, n := d.GetChange("log_delivery_configuration") input.LogDeliveryConfigurations = []*elasticache.LogDeliveryConfigurationRequest{} logTypesToSubmit := make(map[string]bool) - currentLogDeliveryConfig := newLogDeliveryConfig.(*schema.Set).List() + currentLogDeliveryConfig := n.(*schema.Set).List() for _, current := range currentLogDeliveryConfig { logDeliveryConfigurationRequest := expandLogDeliveryConfigurations(current.(map[string]interface{})) logTypesToSubmit[*logDeliveryConfigurationRequest.LogType] = true input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) } - previousLogDeliveryConfig := oldLogDeliveryConfig.(*schema.Set).List() + previousLogDeliveryConfig := o.(*schema.Set).List() for _, previous := range previousLogDeliveryConfig { logDeliveryConfigurationRequest := expandEmptyLogDeliveryConfigurations(previous.(map[string]interface{})) //if something was removed, send an empty request @@ -811,6 +806,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) } } + requestUpdate = true } @@ -824,6 +820,16 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, requestUpdate = true } + if d.HasChange("network_type") { + input.IpDiscovery = aws.String(d.Get("network_type").(string)) + requestUpdate = true + } + + if d.HasChange("node_type") { + input.CacheNodeType = aws.String(d.Get("node_type").(string)) + requestUpdate = true + } + if d.HasChange("notification_topic_arn") { input.NotificationTopicArn = aws.String(d.Get("notification_topic_arn").(string)) requestUpdate = true @@ -834,13 +840,22 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, requestUpdate = true } - if d.HasChange("engine_version") { - input.EngineVersion = aws.String(d.Get("engine_version").(string)) - requestUpdate = true + if d.HasChange("security_group_ids") { + if v, ok := d.GetOk("security_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + requestUpdate = true + } + } + + if d.HasChange("security_group_names") { + if v, ok := d.GetOk("security_group_names"); ok && v.(*schema.Set).Len() > 0 { + input.CacheSecurityGroupNames = flex.ExpandStringSet(v.(*schema.Set)) + requestUpdate = true + } } if d.HasChange("snapshot_retention_limit") { - // This is a real hack to set the Snapshotting Cluster ID to be the first Cluster in the RG + // This is a real hack to set the Snapshotting Cluster ID to be the first Cluster in the RG. o, _ := d.GetChange("snapshot_retention_limit") if o.(int) == 0 { input.SnapshottingClusterId = aws.String(fmt.Sprintf("%s-001", d.Id())) @@ -855,79 +870,84 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, requestUpdate = true } - if d.HasChange("node_type") { - input.CacheNodeType = aws.String(d.Get("node_type").(string)) + if d.HasChange("transit_encryption_enabled") { + input.TransitEncryptionEnabled = aws.Bool(d.Get("transit_encryption_enabled").(bool)) + requestUpdate = true + } + + if d.HasChange("transit_encryption_mode") { + input.TransitEncryptionMode = aws.String(d.Get("transit_encryption_mode").(string)) requestUpdate = true } if d.HasChange("user_group_ids") { - old, new := d.GetChange("user_group_ids") - newSet := new.(*schema.Set) - oldSet := old.(*schema.Set) - add := newSet.Difference(oldSet) - remove := oldSet.Difference(newSet) + o, n := d.GetChange("user_group_ids") + ns, os := n.(*schema.Set), o.(*schema.Set) + add, del := ns.Difference(os), os.Difference(ns) if add.Len() > 0 { input.UserGroupIdsToAdd = flex.ExpandStringSet(add) requestUpdate = true } - if remove.Len() > 0 { - input.UserGroupIdsToRemove = flex.ExpandStringSet(remove) + if del.Len() > 0 { + input.UserGroupIdsToRemove = flex.ExpandStringSet(del) requestUpdate = true } } - if d.HasChange("transit_encryption_enabled") { - input.TransitEncryptionEnabled = aws.Bool(d.Get("transit_encryption_enabled").(bool)) - requestUpdate = true - } - - if d.HasChange("transit_encryption_mode") { - input.TransitEncryptionMode = aws.String(d.Get("transit_encryption_mode").(string)) - requestUpdate = true - } - if requestUpdate { // tagging may cause this resource to not yet be available, so wait for it to be available - _, err := WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableReadDelay) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) to update: %s", d.Id(), err) + const ( + delay = 30 * time.Second + ) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) update: %s", d.Id(), err) } - _, err = conn.ModifyReplicationGroupWithContext(ctx, input) + _, err := conn.ModifyReplicationGroupWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache Replication Group (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s): %s", d.Id(), err) } - _, err = WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableModifyDelay) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) to update: %s", d.Id(), err) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) update: %s", d.Id(), err) } } if d.HasChanges("auth_token", "auth_token_update_strategy") { - params := &elasticache.ModifyReplicationGroupInput{ + input := &elasticache.ModifyReplicationGroupInput{ ApplyImmediately: aws.Bool(true), - ReplicationGroupId: aws.String(d.Id()), - AuthTokenUpdateStrategy: aws.String(d.Get("auth_token_update_strategy").(string)), AuthToken: aws.String(d.Get("auth_token").(string)), + AuthTokenUpdateStrategy: aws.String(d.Get("auth_token_update_strategy").(string)), + ReplicationGroupId: aws.String(d.Id()), } // tagging may cause this resource to not yet be available, so wait for it to be available - _, err := WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableReadDelay) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) to update: %s", d.Id(), err) + const ( + delay = 0 * time.Second + ) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) update: %s", d.Id(), err) } - _, err = conn.ModifyReplicationGroupWithContext(ctx, params) + _, err := conn.ModifyReplicationGroupWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "changing auth_token for ElastiCache Replication Group (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s) authentication: %s", d.Id(), err) } - _, err = WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableModifyDelay) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) auth_token change: %s", d.Id(), err) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) update: %s", d.Id(), err) + } + } + + if d.HasChange("num_cache_clusters") { + if newCacheClusterCount < oldCacheClusterCount { + if err := decreaseReplicationGroupReplicaCount(ctx, conn, d.Id(), newCacheClusterCount, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } } } } @@ -941,28 +961,43 @@ func resourceReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, v, hasGlobalReplicationGroupID := d.GetOk("global_replication_group_id") if hasGlobalReplicationGroupID { - globalReplicationGroupID := v.(string) - err := DisassociateReplicationGroup(ctx, conn, globalReplicationGroupID, d.Id(), meta.(*conns.AWSClient).Region, GlobalReplicationGroupDisassociationReadyTimeout) - if err != nil { - return sdkdiag.AppendErrorf(diags, "disassociating ElastiCache Replication Group (%s) from Global Replication Group (%s): %s", d.Id(), globalReplicationGroupID, err) + if err := disassociateReplicationGroup(ctx, conn, v.(string), d.Id(), meta.(*conns.AWSClient).Region, d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - var finalSnapshotID = d.Get("final_snapshot_identifier").(string) - err := deleteReplicationGroup(ctx, d.Id(), conn, finalSnapshotID, d.Timeout(schema.TimeoutDelete)) - if err != nil { + input := &elasticache.DeleteReplicationGroupInput{ + ReplicationGroupId: aws.String(d.Id()), + } + + if v, ok := d.GetOk("final_snapshot_identifier"); ok { + input.FinalSnapshotIdentifier = aws.String(v.(string)) + } + + // Cache Cluster is creating/deleting or Replication Group is snapshotting + // InvalidReplicationGroupState: Cache cluster tf-acc-test-uqhe-003 is not in a valid state to be deleted + const ( + timeout = 10 * time.Minute // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete. + ) + log.Printf("[INFO] Deleting ElastiCache Replication Group: %s", d.Id()) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { + return conn.DeleteReplicationGroupWithContext(ctx, input) + }, elasticache.ErrCodeInvalidReplicationGroupStateFault) + + switch { + case tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault): + case err != nil: return sdkdiag.AppendErrorf(diags, "deleting ElastiCache Replication Group (%s): %s", d.Id(), err) + default: + if _, err := waitReplicationGroupDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) delete: %s", d.Id(), err) + } } if hasGlobalReplicationGroupID { - paramGroupName := d.Get("parameter_group_name").(string) - if paramGroupName != "" { - err := deleteParameterGroup(ctx, conn, paramGroupName) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { - return diags - } - if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting ElastiCache Parameter Group (%s): %s", d.Id(), err) + if paramGroupName := d.Get("parameter_group_name").(string); paramGroupName != "" { + if err := deleteParameterGroup(ctx, conn, paramGroupName); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } } @@ -970,99 +1005,45 @@ func resourceReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, return diags } -func DisassociateReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, id, region string, readyTimeout time.Duration) error { +func disassociateReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID, region string, timeout time.Duration) error { input := &elasticache.DisassociateGlobalReplicationGroupInput{ GlobalReplicationGroupId: aws.String(globalReplicationGroupID), - ReplicationGroupId: aws.String(id), + ReplicationGroupId: aws.String(replicationGroupID), ReplicationGroupRegion: aws.String(region), } - err := retry.RetryContext(ctx, readyTimeout, func() *retry.RetryError { - _, err := conn.DisassociateGlobalReplicationGroupWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { - return nil - } - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeInvalidGlobalReplicationGroupStateFault) { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) - } + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { + return conn.DisassociateGlobalReplicationGroupWithContext(ctx, input) + }, elasticache.ErrCodeInvalidGlobalReplicationGroupStateFault) + + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.DisassociateGlobalReplicationGroupWithContext(ctx, input) } + if tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidParameterValueException, "is not associated with Global Replication Group") { return nil } - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeInvalidGlobalReplicationGroupStateFault) { - return fmt.Errorf("tried for %s: %w", readyTimeout.String(), err) - } if err != nil { - return err + return fmt.Errorf("disassociating ElastiCache Replication Group (%s) from Global Replication Group (%s): %w", replicationGroupID, globalReplicationGroupID, err) } - _, err = waitGlobalReplicationGroupMemberDetached(ctx, conn, globalReplicationGroupID, id) - if err != nil { - return fmt.Errorf("waiting for completion: %w", err) + if _, err := waitGlobalReplicationGroupMemberDetached(ctx, conn, globalReplicationGroupID, replicationGroupID, timeout); err != nil { + return fmt.Errorf("waiting for ElastiCache Replication Group (%s) detach: %w", replicationGroupID, err) } return nil } -func deleteReplicationGroup(ctx context.Context, replicationGroupID string, conn *elasticache.ElastiCache, finalSnapshotID string, timeout time.Duration) error { - input := &elasticache.DeleteReplicationGroupInput{ - ReplicationGroupId: aws.String(replicationGroupID), - } - if finalSnapshotID != "" { - input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) - } - - // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete - err := retry.RetryContext(ctx, 10*time.Minute, func() *retry.RetryError { - _, err := conn.DeleteReplicationGroupWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault) { - return nil - } - // Cache Cluster is creating/deleting or Replication Group is snapshotting - // InvalidReplicationGroupState: Cache cluster tf-acc-test-uqhe-003 is not in a valid state to be deleted - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeInvalidReplicationGroupStateFault) { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteReplicationGroupWithContext(ctx, input) - } - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault) { - return nil - } - if err != nil { - return err - } - - _, err = WaitReplicationGroupDeleted(ctx, conn, replicationGroupID, timeout) - - return err -} - func modifyReplicationGroupShardConfiguration(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData) error { if d.HasChange("num_node_groups") { - err := modifyReplicationGroupShardConfigurationNumNodeGroups(ctx, conn, d, "num_node_groups") - if err != nil { + if err := modifyReplicationGroupShardConfigurationNumNodeGroups(ctx, conn, d, "num_node_groups"); err != nil { return err } } if d.HasChange("replicas_per_node_group") { - err := modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx, conn, d, "replicas_per_node_group") - if err != nil { + if err := modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx, conn, d, "replicas_per_node_group"); err != nil { return err } } @@ -1072,35 +1053,36 @@ func modifyReplicationGroupShardConfiguration(ctx context.Context, conn *elastic func modifyReplicationGroupShardConfigurationNumNodeGroups(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData, argument string) error { o, n := d.GetChange(argument) - oldNumNodeGroups := o.(int) - newNumNodeGroups := n.(int) + oldNodeGroupCount, newNodeGroupCount := o.(int), n.(int) input := &elasticache.ModifyReplicationGroupShardConfigurationInput{ ApplyImmediately: aws.Bool(true), - NodeGroupCount: aws.Int64(int64(newNumNodeGroups)), + NodeGroupCount: aws.Int64(int64(newNodeGroupCount)), ReplicationGroupId: aws.String(d.Id()), } - if oldNumNodeGroups > newNumNodeGroups { + if oldNodeGroupCount > newNodeGroupCount { // Node Group IDs are 1 indexed: 0001 through 0015 // Loop from highest old ID until we reach highest new ID nodeGroupsToRemove := []string{} - for i := oldNumNodeGroups; i > newNumNodeGroups; i-- { + for i := oldNodeGroupCount; i > newNodeGroupCount; i-- { nodeGroupID := fmt.Sprintf("%04d", i) nodeGroupsToRemove = append(nodeGroupsToRemove, nodeGroupID) } input.NodeGroupsToRemove = aws.StringSlice(nodeGroupsToRemove) } - log.Printf("[DEBUG] Modifying ElastiCache Replication Group (%s) shard configuration: %s", d.Id(), input) _, err := conn.ModifyReplicationGroupShardConfigurationWithContext(ctx, input) + if err != nil { - return fmt.Errorf("modifying ElastiCache Replication Group shard configuration: %w", err) + return fmt.Errorf("modifying ElastiCache Replication Group (%s) shard configuration: %w", d.Id(), err) } - _, err = WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableModifyDelay) - if err != nil { - return fmt.Errorf("waiting for ElastiCache Replication Group (%s) shard reconfiguration completion: %w", d.Id(), err) + const ( + delay = 30 * time.Second + ) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return fmt.Errorf("waiting for ElastiCache Replication Group (%s) update: %w", d.Id(), err) } return nil @@ -1108,94 +1090,284 @@ func modifyReplicationGroupShardConfigurationNumNodeGroups(ctx context.Context, func modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData, argument string) error { o, n := d.GetChange(argument) - oldReplicas := o.(int) - newReplicas := n.(int) + oldReplicaCount, newReplicaCount := o.(int), n.(int) - if newReplicas > oldReplicas { + if newReplicaCount > oldReplicaCount { input := &elasticache.IncreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newReplicas)), + NewReplicaCount: aws.Int64(int64(newReplicaCount)), ReplicationGroupId: aws.String(d.Id()), } + _, err := conn.IncreaseReplicaCountWithContext(ctx, input) + if err != nil { - return fmt.Errorf("adding ElastiCache Replication Group (%s) replicas: %w", d.Id(), err) + return fmt.Errorf("increasing ElastiCache Replication Group (%s) replica count (%d): %w", d.Id(), newReplicaCount, err) } - _, err = WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableModifyDelay) - if err != nil { - return fmt.Errorf("waiting for ElastiCache Replication Group (%s) replica addition: %w", d.Id(), err) + + const ( + delay = 30 * time.Second + ) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return fmt.Errorf("waiting for ElastiCache Replication Group (%s) update: %w", d.Id(), err) } - } else { + } else if newReplicaCount < oldReplicaCount { input := &elasticache.DecreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newReplicas)), + NewReplicaCount: aws.Int64(int64(newReplicaCount)), ReplicationGroupId: aws.String(d.Id()), } + _, err := conn.DecreaseReplicaCountWithContext(ctx, input) + if err != nil { - return fmt.Errorf("removing ElastiCache Replication Group (%s) replicas: %w", d.Id(), err) + return fmt.Errorf("decreasing ElastiCache Replication Group (%s) replica count (%d): %w", d.Id(), newReplicaCount, err) } - _, err = WaitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), replicationGroupAvailableModifyDelay) - if err != nil { - return fmt.Errorf("waiting for ElastiCache Replication Group (%s) replica removal: %w", d.Id(), err) + + const ( + delay = 30 * time.Second + ) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return fmt.Errorf("waiting for ElastiCache Replication Group (%s) update: %w", d.Id(), err) } } return nil } -func modifyReplicationGroupNumCacheClusters(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData, argument string) error { - o, n := d.GetChange(argument) - oldNumberCacheClusters := o.(int) - newNumberCacheClusters := n.(int) - - var err error - if newNumberCacheClusters > oldNumberCacheClusters { - err = increaseReplicationGroupNumCacheClusters(ctx, conn, d.Id(), newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate)) - } else if newNumberCacheClusters < oldNumberCacheClusters { - err = decreaseReplicationGroupNumCacheClusters(ctx, conn, d.Id(), newNumberCacheClusters, d.Timeout(schema.TimeoutUpdate)) - } - return err -} - -func increaseReplicationGroupNumCacheClusters(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, newNumberCacheClusters int, timeout time.Duration) error { +func increaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, newReplicaCount int, timeout time.Duration) error { input := &elasticache.IncreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newNumberCacheClusters - 1)), + NewReplicaCount: aws.Int64(int64(newReplicaCount - 1)), ReplicationGroupId: aws.String(replicationGroupID), } + _, err := conn.IncreaseReplicaCountWithContext(ctx, input) + if err != nil { - return fmt.Errorf("adding ElastiCache Replication Group (%s) replicas: %w", replicationGroupID, err) + return fmt.Errorf("increasing ElastiCache Replication Group (%s) replica count (%d): %w", replicationGroupID, newReplicaCount-1, err) } - _, err = WaitReplicationGroupMemberClustersAvailable(ctx, conn, replicationGroupID, timeout) - if err != nil { - return fmt.Errorf("waiting for ElastiCache Replication Group (%s) replica addition: %w", replicationGroupID, err) + if _, err := waitReplicationGroupMemberClustersAvailable(ctx, conn, replicationGroupID, timeout); err != nil { + return fmt.Errorf("waiting for ElastiCache Replication Group (%s) member cluster update: %w", replicationGroupID, err) } return nil } -func decreaseReplicationGroupNumCacheClusters(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, newNumberCacheClusters int, timeout time.Duration) error { +func decreaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, newReplicaCount int, timeout time.Duration) error { input := &elasticache.DecreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newNumberCacheClusters - 1)), + NewReplicaCount: aws.Int64(int64(newReplicaCount - 1)), ReplicationGroupId: aws.String(replicationGroupID), } + _, err := conn.DecreaseReplicaCountWithContext(ctx, input) + if err != nil { - return fmt.Errorf("removing ElastiCache Replication Group (%s) replicas: %w", replicationGroupID, err) + return fmt.Errorf("decreasing ElastiCache Replication Group (%s) replica count (%d): %w", replicationGroupID, newReplicaCount-1, err) } - _, err = WaitReplicationGroupMemberClustersAvailable(ctx, conn, replicationGroupID, timeout) - if err != nil { - return fmt.Errorf("waiting for ElastiCache Replication Group (%s) replica removal: %w", replicationGroupID, err) + if _, err := waitReplicationGroupMemberClustersAvailable(ctx, conn, replicationGroupID, timeout); err != nil { + return fmt.Errorf("waiting for ElastiCache Replication Group (%s) member cluster update: %w", replicationGroupID, err) } return nil } +func findReplicationGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.ReplicationGroup, error) { + input := &elasticache.DescribeReplicationGroupsInput{ + ReplicationGroupId: aws.String(id), + } + + return findReplicationGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.ReplicationGroup]()) +} + +func findReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeReplicationGroupsInput, filter tfslices.Predicate[*elasticache.ReplicationGroup]) (*elasticache.ReplicationGroup, error) { + output, err := findReplicationGroups(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findReplicationGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeReplicationGroupsInput, filter tfslices.Predicate[*elasticache.ReplicationGroup]) ([]*elasticache.ReplicationGroup, error) { + var output []*elasticache.ReplicationGroup + + err := conn.DescribeReplicationGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeReplicationGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.ReplicationGroups { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + +func statusReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findReplicationGroupByID(ctx, conn, replicationGroupID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status), nil + } +} + +const ( + replicationGroupStatusAvailable = "available" + replicationGroupStatusCreateFailed = "create-failed" + replicationGroupStatusCreating = "creating" + replicationGroupStatusDeleting = "deleting" + replicationGroupStatusModifying = "modifying" + replicationGroupStatusSnapshotting = "snapshotting" +) + +func waitReplicationGroupAvailable(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration, delay time.Duration) (*elasticache.ReplicationGroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + replicationGroupStatusCreating, + replicationGroupStatusModifying, + replicationGroupStatusSnapshotting, + }, + Target: []string{replicationGroupStatusAvailable}, + Refresh: statusReplicationGroup(ctx, conn, replicationGroupID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: delay, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + return output, err + } + + return nil, err +} + +func waitReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + replicationGroupStatusCreating, + replicationGroupStatusAvailable, + replicationGroupStatusDeleting, + }, + Target: []string{}, + Refresh: statusReplicationGroup(ctx, conn, replicationGroupID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + return output, err + } + + return nil, err +} + +func findReplicationGroupMemberClustersByID(ctx context.Context, conn *elasticache.ElastiCache, id string) ([]*elasticache.CacheCluster, error) { + rg, err := findReplicationGroupByID(ctx, conn, id) + + if err != nil { + return nil, err + } + + ids := aws.StringValueSlice(rg.MemberClusters) + clusters, err := findCacheClusters(ctx, conn, &elasticache.DescribeCacheClustersInput{}, func(v *elasticache.CacheCluster) bool { + return slices.Contains(ids, aws.StringValue(v.CacheClusterId)) + }) + + if err != nil { + return nil, err + } + + if len(clusters) == 0 { + return nil, tfresource.NewEmptyResultError(nil) + } + + return clusters, nil +} + +// statusReplicationGroupMemberClusters fetches the Replication Group's Member Clusters and either "available" or the first non-"available" status. +// NOTE: This function assumes that the intended end-state is to have all member clusters in "available" status. +func statusReplicationGroupMemberClusters(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findReplicationGroupMemberClustersByID(ctx, conn, replicationGroupID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + status := cacheClusterStatusAvailable + for _, v := range output { + if clusterStatus := aws.StringValue(v.CacheClusterStatus); clusterStatus != cacheClusterStatusAvailable { + status = clusterStatus + break + } + } + + return output, status, nil + } +} + +func waitReplicationGroupMemberClustersAvailable(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) ([]*elasticache.CacheCluster, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{ + cacheClusterStatusCreating, + cacheClusterStatusDeleting, + cacheClusterStatusModifying, + cacheClusterStatusSnapshotting, + }, + Target: []string{cacheClusterStatusAvailable}, + Refresh: statusReplicationGroupMemberClusters(ctx, conn, replicationGroupID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.([]*elasticache.CacheCluster); ok { + return output, err + } + + return nil, err +} + var validateReplicationGroupID schema.SchemaValidateFunc = validation.All( validation.StringLenBetween(1, 40), validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z-]+$`), "must contain only alphanumeric characters and hyphens"), diff --git a/internal/service/elasticache/replication_group_data_source.go b/internal/service/elasticache/replication_group_data_source.go index 90bf3b8c4a9..d80cc096907 100644 --- a/internal/service/elasticache/replication_group_data_source.go +++ b/internal/service/elasticache/replication_group_data_source.go @@ -15,18 +15,14 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKDataSource("aws_elasticache_replication_group") -func DataSourceReplicationGroup() *schema.Resource { +// @SDKDataSource("aws_elasticache_replication_group", name="Replication Group") +func dataSourceReplicationGroup() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReplicationGroupRead, Schema: map[string]*schema.Schema{ - "replication_group_id": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validateReplicationGroupID, - }, "arn": { Type: schema.TypeString, Computed: true, @@ -39,23 +35,48 @@ func DataSourceReplicationGroup() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "configuration_endpoint_address": { + Type: schema.TypeString, + Computed: true, + }, "description": { Type: schema.TypeString, Computed: true, }, - "port": { - Type: schema.TypeInt, + "log_delivery_configuration": { + Type: schema.TypeSet, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination": { + Type: schema.TypeString, + Computed: true, + }, + "destination_type": { + Type: schema.TypeString, + Computed: true, + }, + "log_format": { + Type: schema.TypeString, + Computed: true, + }, + "log_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, - "configuration_endpoint_address": { - Type: schema.TypeString, + "member_clusters": { + Type: schema.TypeSet, Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "primary_endpoint_address": { - Type: schema.TypeString, + "multi_az_enabled": { + Type: schema.TypeBool, Computed: true, }, - "reader_endpoint_address": { + "node_type": { Type: schema.TypeString, Computed: true, }, @@ -67,55 +88,35 @@ func DataSourceReplicationGroup() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "member_clusters": { - Type: schema.TypeSet, + "port": { + Type: schema.TypeInt, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, }, - "multi_az_enabled": { - Type: schema.TypeBool, + "primary_endpoint_address": { + Type: schema.TypeString, Computed: true, }, - "node_type": { + "reader_endpoint_address": { Type: schema.TypeString, Computed: true, }, + "replication_group_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateReplicationGroupID, + }, "replicas_per_node_group": { Type: schema.TypeInt, Computed: true, }, - "log_delivery_configuration": { - Type: schema.TypeSet, + "snapshot_retention_limit": { + Type: schema.TypeInt, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_type": { - Type: schema.TypeString, - Computed: true, - }, - "destination": { - Type: schema.TypeString, - Computed: true, - }, - "log_format": { - Type: schema.TypeString, - Computed: true, - }, - "log_type": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, }, "snapshot_window": { Type: schema.TypeString, Computed: true, }, - "snapshot_retention_limit": { - Type: schema.TypeInt, - Computed: true, - }, }, } } @@ -126,9 +127,10 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, groupID := d.Get("replication_group_id").(string) - rg, err := FindReplicationGroupByID(ctx, conn, groupID) + rg, err := findReplicationGroupByID(ctx, conn, groupID) + if err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): %s", groupID, err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache Replication Group", err)) } d.SetId(aws.StringValue(rg.ReplicationGroupId)) @@ -179,5 +181,6 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, d.Set("log_delivery_configuration", flattenLogDeliveryConfigurations(rg.LogDeliveryConfigurations)) d.Set("snapshot_window", rg.SnapshotWindow) d.Set("snapshot_retention_limit", rg.SnapshotRetentionLimit) + return diags } diff --git a/internal/service/elasticache/replication_group_test.go b/internal/service/elasticache/replication_group_test.go index 40b05741c53..5ca1f9c40bb 100644 --- a/internal/service/elasticache/replication_group_test.go +++ b/internal/service/elasticache/replication_group_test.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfelasticache "github.com/hashicorp/terraform-provider-aws/internal/service/elasticache" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -2263,7 +2264,7 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupID_basic(t *testin Config: testAccReplicationGroupConfig_globalIDBasic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), - testAccCheckReplicationGroupParameterGroup(ctx, &rg, &pg), + testAccCheckReplicationGroupParameterGroupExists(ctx, &rg, &pg), resource.TestCheckResourceAttrPair(resourceName, "global_replication_group_id", "aws_elasticache_global_replication_group.test", "global_replication_group_id"), resource.TestCheckResourceAttrPair(resourceName, "node_type", primaryGroupResourceName, "node_type"), resource.TestCheckResourceAttrPair(resourceName, "engine", primaryGroupResourceName, "engine"), @@ -2316,7 +2317,7 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupID_full(t *testing Config: testAccReplicationGroupConfig_globalIDFull(rName, initialNumCacheClusters), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg1), - testAccCheckReplicationGroupParameterGroup(ctx, &rg1, &pg1), + testAccCheckReplicationGroupParameterGroupExists(ctx, &rg1, &pg1), resource.TestCheckResourceAttrPair(resourceName, "global_replication_group_id", "aws_elasticache_global_replication_group.test", "global_replication_group_id"), resource.TestCheckResourceAttrPair(resourceName, "node_type", primaryGroupResourceName, "node_type"), resource.TestCheckResourceAttrPair(resourceName, "engine", primaryGroupResourceName, "engine"), @@ -2344,7 +2345,7 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupID_full(t *testing Config: testAccReplicationGroupConfig_globalIDFull(rName, updatedNumCacheClusters), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg2), - testAccCheckReplicationGroupParameterGroup(ctx, &rg2, &pg2), + testAccCheckReplicationGroupParameterGroupExists(ctx, &rg2, &pg2), resource.TestCheckResourceAttr(resourceName, "num_cache_clusters", strconv.Itoa(updatedNumCacheClusters)), ), }, @@ -2414,7 +2415,7 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupIDClusterMode_basi Config: testAccReplicationGroupConfig_globalIDClusterMode(rName, 2, 1), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg1), - testAccCheckReplicationGroupParameterGroup(ctx, &rg1, &pg1), + testAccCheckReplicationGroupParameterGroupExists(ctx, &rg1, &pg1), resource.TestCheckResourceAttr(resourceName, "num_node_groups", "2"), resource.TestCheckResourceAttr(resourceName, "replicas_per_node_group", "1"), resource.TestCheckResourceAttr(resourceName, "automatic_failover_enabled", "true"), @@ -2435,7 +2436,7 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupIDClusterMode_basi Config: testAccReplicationGroupConfig_globalIDClusterMode(rName, 1, 3), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg2), - testAccCheckReplicationGroupParameterGroup(ctx, &rg2, &pg2), + testAccCheckReplicationGroupParameterGroupExists(ctx, &rg2, &pg2), resource.TestCheckResourceAttr(resourceName, "num_node_groups", "2"), resource.TestCheckResourceAttr(resourceName, "replicas_per_node_group", "3"), @@ -2730,17 +2731,15 @@ func testAccCheckReplicationGroupExists(ctx context.Context, n string, v *elasti return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No replication group ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - rg, err := tfelasticache.FindReplicationGroupByID(ctx, conn, rs.Primary.ID) + + output, err := tfelasticache.FindReplicationGroupByID(ctx, conn, rs.Primary.ID) + if err != nil { - return fmt.Errorf("ElastiCache error: %w", err) + return err } - *v = *rg + *v = *output return nil } @@ -2754,79 +2753,90 @@ func testAccCheckReplicationGroupDestroy(ctx context.Context) resource.TestCheck if rs.Type != "aws_elasticache_replication_group" { continue } + _, err := tfelasticache.FindReplicationGroupByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { continue } + if err != nil { return err } + return fmt.Errorf("ElastiCache Replication Group (%s) still exists", rs.Primary.ID) } return nil } } -func testAccCheckReplicationGroupParameterGroup(ctx context.Context, rg *elasticache.ReplicationGroup, pg *elasticache.CacheParameterGroup) resource.TestCheckFunc { +func testAccCheckReplicationGroupParameterGroupExists(ctx context.Context, rg *elasticache.ReplicationGroup, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - cacheCluster := rg.NodeGroups[0].NodeGroupMembers[0] - cluster, err := tfelasticache.FindCacheClusterByID(ctx, conn, aws.StringValue(cacheCluster.CacheClusterId)) + cacheClusterID := aws.StringValue(rg.NodeGroups[0].NodeGroupMembers[0].CacheClusterId) + cluster, err := tfelasticache.FindCacheClusterByID(ctx, conn, cacheClusterID) + if err != nil { - return fmt.Errorf("could not retrieve cache cluster (%s): %w", aws.StringValue(cacheCluster.CacheClusterId), err) + return fmt.Errorf("reading ElastiCache Cluster (%s): %w", cacheClusterID, err) } - paramGroupName := aws.StringValue(cluster.CacheParameterGroup.CacheParameterGroupName) + name := aws.StringValue(cluster.CacheParameterGroup.CacheParameterGroupName) + output, err := tfelasticache.FindCacheParameterGroupByName(ctx, conn, name) - group, err := tfelasticache.FindParameterGroupByName(ctx, conn, paramGroupName) if err != nil { - return fmt.Errorf("error retrieving parameter group (%s): %w", paramGroupName, err) + return fmt.Errorf("reading ElastiCache Parameter Group (%s): %w", name, err) } - *pg = *group + *v = *output return nil } } -func testAccCheckGlobalReplicationGroupMemberParameterGroupDestroy(ctx context.Context, pg *elasticache.CacheParameterGroup) resource.TestCheckFunc { +func testAccCheckGlobalReplicationGroupMemberParameterGroupDestroy(ctx context.Context, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - paramGroupName := aws.StringValue(pg.CacheParameterGroupName) + name := aws.StringValue(v.CacheParameterGroupName) + _, err := tfelasticache.FindCacheParameterGroupByName(ctx, conn, name) - _, err := tfelasticache.FindParameterGroupByName(ctx, conn, paramGroupName) if tfresource.NotFound(err) { return nil } + if err != nil { - return fmt.Errorf("error finding parameter group (%s): %w", paramGroupName, err) + return err } - return fmt.Errorf("Cache Parameter Group (%s) still exists", paramGroupName) + + return fmt.Errorf("ElastiCache Parameter Group (%s) still exists", name) } } -func testAccCheckReplicationGroupUserGroup(ctx context.Context, resourceName, userGroupID string) resource.TestCheckFunc { +func testAccCheckReplicationGroupUserGroup(ctx context.Context, n, userGroupID string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - rg, err := tfelasticache.FindReplicationGroupByID(ctx, conn, rs.Primary.ID) + + id := rs.Primary.ID + output, err := tfelasticache.FindReplicationGroupByID(ctx, conn, id) if err != nil { return err } - if len(rg.UserGroupIds) < 1 { - return fmt.Errorf("ElastiCache Replication Group (%s) was not assigned any usergroups", resourceName) + + if len(output.UserGroupIds) < 1 { + return fmt.Errorf("ElastiCache Replication Group (%s) was not assigned any User Groups", id) } - if *rg.UserGroupIds[0] != userGroupID { - return fmt.Errorf("ElastiCache Replication Group (%s) was not assigned usergroup (%s), usergroup was (%s) instead", resourceName, userGroupID, *rg.UserGroupIds[0]) + if v := aws.StringValue(output.UserGroupIds[0]); v != userGroupID { + return fmt.Errorf("ElastiCache Replication Group (%s) was not assigned User Group (%s), User Group was (%s) instead", n, userGroupID, v) } + return nil } } @@ -2902,15 +2912,21 @@ func testCheckRedisParameterGroupDefault(ctx context.Context, version *elasticac return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - parameterGroup, err := tfelasticache.FindParameterGroupByFilter(ctx, conn, - tfelasticache.FilterRedisParameterGroupFamily(aws.StringValue(version.CacheParameterGroupFamily)), - tfelasticache.FilterRedisParameterGroupNameDefault, - ) + output, err := tfelasticache.FindCacheParameterGroup(ctx, conn, &elasticache.DescribeCacheParameterGroupsInput{}, tfslices.PredicateAnd( + func(v *elasticache.CacheParameterGroup) bool { + return aws.StringValue(v.CacheParameterGroupFamily) == aws.StringValue(version.CacheParameterGroupFamily) + }, + func(v *elasticache.CacheParameterGroup) bool { + name := aws.StringValue(v.CacheParameterGroupName) + return strings.HasPrefix(name, "default.") && !strings.HasSuffix(name, ".cluster.on") + }, + )) + if err != nil { return err } - *v = *parameterGroup + *v = *output return nil } @@ -2943,15 +2959,21 @@ func testCheckRedisParameterGroupClusterEnabledDefault(ctx context.Context, vers return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - parameterGroup, err := tfelasticache.FindParameterGroupByFilter(ctx, conn, - tfelasticache.FilterRedisParameterGroupFamily(aws.StringValue(version.CacheParameterGroupFamily)), - tfelasticache.FilterRedisParameterGroupNameClusterEnabledDefault, - ) + output, err := tfelasticache.FindCacheParameterGroup(ctx, conn, &elasticache.DescribeCacheParameterGroupsInput{}, tfslices.PredicateAnd( + func(v *elasticache.CacheParameterGroup) bool { + return aws.StringValue(v.CacheParameterGroupFamily) == aws.StringValue(version.CacheParameterGroupFamily) + }, + func(v *elasticache.CacheParameterGroup) bool { + name := aws.StringValue(v.CacheParameterGroupName) + return strings.HasPrefix(name, "default.") && strings.HasSuffix(name, ".cluster.on") + }, + )) + if err != nil { return err } - *v = *parameterGroup + *v = *output return nil } @@ -4369,7 +4391,10 @@ func resourceReplicationGroupModify(ctx context.Context, conn *elasticache.Elast return fmt.Errorf("error requesting modification: %w", err) } - _, err = tfelasticache.WaitReplicationGroupAvailable(ctx, conn, aws.StringValue(input.ReplicationGroupId), timeout, tfelasticache.ReplicationGroupAvailableModifyDelay) + const ( + delay = 30 * time.Second + ) + _, err = tfelasticache.WaitReplicationGroupAvailable(ctx, conn, aws.StringValue(input.ReplicationGroupId), timeout, delay) if err != nil { return fmt.Errorf("error waiting for modification: %w", err) } diff --git a/internal/service/elasticache/serverless_cache.go b/internal/service/elasticache/serverless_cache.go index 4322d2c55b6..a1fab400f69 100644 --- a/internal/service/elasticache/serverless_cache.go +++ b/internal/service/elasticache/serverless_cache.go @@ -5,8 +5,10 @@ package elasticache import ( "context" + "fmt" "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticache" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" @@ -23,7 +25,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" @@ -46,17 +48,13 @@ func newServerlessCacheResource(context.Context) (resource.ResourceWithConfigure return r, nil } -const ( - ResNameServerlessCache = "Serverless Cache" -) - type serverlessCacheResource struct { framework.ResourceWithConfigure framework.WithImportByID framework.WithTimeouts } -func (r *serverlessCacheResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { +func (*serverlessCacheResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { response.TypeName = "aws_elasticache_serverless_cache" } @@ -274,28 +272,23 @@ func (r *serverlessCacheResource) Create(ctx context.Context, request resource.C _, err := conn.CreateServerlessCache(ctx, input) if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionCreating, ResNameServerlessCache, data.ServerlessCacheName.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError("creating ElastiCache Serverless Cache", err.Error()) + return } // Set values for unknowns. data.setID() - createTimeout := r.CreateTimeout(ctx, data.Timeouts) - out, err := waitServerlessCacheAvailable(ctx, conn, data.ID.ValueString(), createTimeout) + output, err := waitServerlessCacheAvailable(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionWaitingForCreation, ResNameServerlessCache, data.ID.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("waiting for ElastiCache Serverless Cache (%s) create", data.ID.ValueString()), err.Error()) + return } - response.Diagnostics.Append(fwflex.Flatten(ctx, out, &data)...) + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) if response.Diagnostics.HasError() { return } @@ -318,7 +311,7 @@ func (r *serverlessCacheResource) Read(ctx context.Context, request resource.Rea conn := r.Meta().ElastiCacheClient(ctx) - out, err := FindServerlessCacheByID(ctx, conn, data.ID.ValueString()) + output, err := findServerlessCacheByID(ctx, conn, data.ID.ValueString()) if tfresource.NotFound(err) { response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) @@ -328,14 +321,12 @@ func (r *serverlessCacheResource) Read(ctx context.Context, request resource.Rea } if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionSetting, ResNameServerlessCache, data.ID.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading ElastiCache Serverless Cache (%s)", data.ID.ValueString()), err.Error()) + return } - response.Diagnostics.Append(fwflex.Flatten(ctx, out, &data)...) + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) if response.Diagnostics.HasError() { return } @@ -366,38 +357,29 @@ func (r *serverlessCacheResource) Update(ctx context.Context, request resource.U _, err := conn.ModifyServerlessCache(ctx, input) if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionUpdating, ResNameServerlessCache, old.ServerlessCacheName.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("updating ElastiCache Serverless Cache (%s)", new.ID.ValueString()), err.Error()) + return } - updateTimeout := r.UpdateTimeout(ctx, new.Timeouts) - _, err = waitServerlessCacheAvailable(ctx, conn, old.ServerlessCacheName.ValueString(), updateTimeout) + if _, err := waitServerlessCacheAvailable(ctx, conn, old.ServerlessCacheName.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for ElastiCache Serverless Cache (%s) update", new.ID.ValueString()), err.Error()) - if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionWaitingForUpdate, ResNameServerlessCache, new.ServerlessCacheName.ValueString(), err), - err.Error(), - ) return } } // AWS returns null values for certain values that are available on redis only. // always set these values to the state value to avoid unnecessary diff failures on computed values. - out, err := FindServerlessCacheByID(ctx, conn, old.ID.ValueString()) + output, err := findServerlessCacheByID(ctx, conn, old.ID.ValueString()) if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionUpdating, ResNameServerlessCache, old.ServerlessCacheName.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading ElastiCache Serverless Cache (%s)", old.ID.ValueString()), err.Error()) + return } - response.Diagnostics.Append(fwflex.Flatten(ctx, out, &new)...) + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &new)...) if response.Diagnostics.HasError() { return } @@ -425,28 +407,21 @@ func (r *serverlessCacheResource) Delete(ctx context.Context, request resource.D _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func() (interface{}, error) { return conn.DeleteServerlessCache(ctx, input) - }, "DependencyViolation") + }, errCodeDependencyViolation) if errs.IsA[*awstypes.ServerlessCacheNotFoundFault](err) { return } if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionDeleting, ResNameServerlessCache, data.ID.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting ElastiCache Serverless Cache (%s)", data.ID.ValueString()), err.Error()) + return } - deleteTimeout := r.DeleteTimeout(ctx, data.Timeouts) - _, err = waitServerlessCacheDeleted(ctx, conn, data.ID.ValueString(), deleteTimeout) + if _, err := waitServerlessCacheDeleted(ctx, conn, data.ID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for ElastiCache Serverless Cache (%s) delete", data.ID.ValueString()), err.Error()) - if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.ElastiCache, create.ErrActionWaitingForDeletion, ResNameServerlessCache, data.ID.ValueString(), err), - err.Error(), - ) return } } @@ -455,6 +430,116 @@ func (r *serverlessCacheResource) ModifyPlan(ctx context.Context, request resour r.SetTagsAll(ctx, request, response) } +func findServerlessCache(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeServerlessCachesInput) (*awstypes.ServerlessCache, error) { + output, err := findServerlessCaches(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findServerlessCaches(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeServerlessCachesInput) ([]awstypes.ServerlessCache, error) { + var output []awstypes.ServerlessCache + + pages := elasticache.NewDescribeServerlessCachesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ServerlessCacheNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.ServerlessCaches...) + } + + return output, nil +} + +func findServerlessCacheByID(ctx context.Context, conn *elasticache.Client, id string) (*awstypes.ServerlessCache, error) { + input := &elasticache.DescribeServerlessCachesInput{ + ServerlessCacheName: aws.String(id), + } + + return findServerlessCache(ctx, conn, input) +} + +func statusServerlessCache(ctx context.Context, conn *elasticache.Client, cacheClusterID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findServerlessCacheByID(ctx, conn, cacheClusterID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + + return output, aws.ToString(output.Status), nil + } +} + +const ( + serverlessCacheStatusAvailable = "available" + serverlessCacheStatusCreating = "creating" + serverlessCacheStatusDeleting = "deleting" + serverlessCacheStatusModifying = "modifying" +) + +func waitServerlessCacheAvailable(ctx context.Context, conn *elasticache.Client, cacheClusterID string, timeout time.Duration) (*awstypes.ServerlessCache, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + serverlessCacheStatusCreating, + serverlessCacheStatusDeleting, + serverlessCacheStatusModifying, + }, + Target: []string{serverlessCacheStatusAvailable}, + Refresh: statusServerlessCache(ctx, conn, cacheClusterID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ServerlessCache); ok { + return output, err + } + + return nil, err +} + +func waitServerlessCacheDeleted(ctx context.Context, conn *elasticache.Client, cacheClusterID string, timeout time.Duration) (*awstypes.ServerlessCache, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + serverlessCacheStatusCreating, + serverlessCacheStatusDeleting, + serverlessCacheStatusModifying, + }, + Target: []string{}, + Refresh: statusServerlessCache(ctx, conn, cacheClusterID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ServerlessCache); ok { + return output, err + } + + return nil, err +} + type serverlessCacheResourceModel struct { ARN types.String `tfsdk:"arn"` CacheUsageLimits fwtypes.ListNestedObjectValueOf[cacheUsageLimitsModel] `tfsdk:"cache_usage_limits"` diff --git a/internal/service/elasticache/serverless_cache_test.go b/internal/service/elasticache/serverless_cache_test.go index 6b19c975c7f..99747aaf98c 100644 --- a/internal/service/elasticache/serverless_cache_test.go +++ b/internal/service/elasticache/serverless_cache_test.go @@ -415,24 +415,22 @@ func TestAccElastiCacheServerlessCache_tags(t *testing.T) { }) } -func testAccCheckServerlessCacheExists(ctx context.Context, resourceName string, v *awstypes.ServerlessCache) resource.TestCheckFunc { +func testAccCheckServerlessCacheExists(ctx context.Context, n string, v *awstypes.ServerlessCache) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("no ElastiCache Serverless ID is set") + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) - out, err := tfelasticache.FindServerlessCacheByID(ctx, conn, rs.Primary.ID) + + output, err := tfelasticache.FindServerlessCacheByID(ctx, conn, rs.Primary.ID) + if err != nil { - return fmt.Errorf("retrieving ElastiCache Serverlesss (%s): %w", rs.Primary.ID, err) + return err } - *v = out + *v = *output return nil } @@ -448,6 +446,7 @@ func testAccCheckServerlessCacheDestroy(ctx context.Context) resource.TestCheckF } _, err := tfelasticache.FindServerlessCacheByID(ctx, conn, rs.Primary.ID) + if tfresource.NotFound(err) { continue } @@ -455,7 +454,7 @@ func testAccCheckServerlessCacheDestroy(ctx context.Context) resource.TestCheckF return err } - return fmt.Errorf("ElastiCache Serverless (%s) still exists", rs.Primary.ID) + return fmt.Errorf("ElastiCache Serverless Cache (%s) still exists", rs.Primary.ID) } return nil diff --git a/internal/service/elasticache/service_package_gen.go b/internal/service/elasticache/service_package_gen.go index a677c2a0d68..d8039414f07 100644 --- a/internal/service/elasticache/service_package_gen.go +++ b/internal/service/elasticache/service_package_gen.go @@ -36,12 +36,14 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceCluster, + Factory: dataSourceCluster, TypeName: "aws_elasticache_cluster", + Name: "Cluster", }, { - Factory: DataSourceReplicationGroup, + Factory: dataSourceReplicationGroup, TypeName: "aws_elasticache_replication_group", + Name: "Replication Group", }, { Factory: dataSourceSubnetGroup, @@ -49,8 +51,9 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Name: "Subnet Group", }, { - Factory: DataSourceUser, + Factory: dataSourceUser, TypeName: "aws_elasticache_user", + Name: "User", }, } } @@ -58,7 +61,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceCluster, + Factory: resourceCluster, TypeName: "aws_elasticache_cluster", Name: "Cluster", Tags: &types.ServicePackageResourceTags{ @@ -66,11 +69,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceGlobalReplicationGroup, + Factory: resourceGlobalReplicationGroup, TypeName: "aws_elasticache_global_replication_group", + Name: "Global Replication Group", }, { - Factory: ResourceParameterGroup, + Factory: resourceParameterGroup, TypeName: "aws_elasticache_parameter_group", Name: "Parameter Group", Tags: &types.ServicePackageResourceTags{ @@ -78,7 +82,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationGroup, + Factory: resourceReplicationGroup, TypeName: "aws_elasticache_replication_group", Name: "Replication Group", Tags: &types.ServicePackageResourceTags{ @@ -94,7 +98,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceUser, + Factory: resourceUser, TypeName: "aws_elasticache_user", Name: "User", Tags: &types.ServicePackageResourceTags{ @@ -102,7 +106,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceUserGroup, + Factory: resourceUserGroup, TypeName: "aws_elasticache_user_group", Name: "User Group", Tags: &types.ServicePackageResourceTags{ @@ -110,8 +114,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceUserGroupAssociation, + Factory: resourceUserGroupAssociation, TypeName: "aws_elasticache_user_group_association", + Name: "User Group Association", }, } } diff --git a/internal/service/elasticache/status.go b/internal/service/elasticache/status.go deleted file mode 100644 index 5d04270f9ae..00000000000 --- a/internal/service/elasticache/status.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package elasticache - -import ( - "context" - - elasticache_v2 "github.com/aws/aws-sdk-go-v2/service/elasticache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -const ( - ReplicationGroupStatusCreating = "creating" - ReplicationGroupStatusAvailable = "available" - ReplicationGroupStatusModifying = "modifying" - ReplicationGroupStatusDeleting = "deleting" - ReplicationGroupStatusCreateFailed = "create-failed" - ReplicationGroupStatusSnapshotting = "snapshotting" -) - -// StatusReplicationGroup fetches the Replication Group and its Status -func StatusReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - rg, err := FindReplicationGroupByID(ctx, conn, replicationGroupID) - if tfresource.NotFound(err) { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - - return rg, aws.StringValue(rg.Status), nil - } -} - -// StatusReplicationGroupMemberClusters fetches the Replication Group's Member Clusters and either "available" or the first non-"available" status. -// NOTE: This function assumes that the intended end-state is to have all member clusters in "available" status. -func StatusReplicationGroupMemberClusters(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - clusters, err := FindReplicationGroupMemberClustersByID(ctx, conn, replicationGroupID) - if tfresource.NotFound(err) { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - - status := CacheClusterStatusAvailable - for _, v := range clusters { - clusterStatus := aws.StringValue(v.CacheClusterStatus) - if clusterStatus != CacheClusterStatusAvailable { - status = clusterStatus - break - } - } - return clusters, status, nil - } -} - -const ( - CacheClusterStatusAvailable = "available" - CacheClusterStatusCreating = "creating" - CacheClusterStatusDeleted = "deleted" - CacheClusterStatusDeleting = "deleting" - CacheClusterStatusIncompatibleNetwork = "incompatible-network" - CacheClusterStatusModifying = "modifying" - CacheClusterStatusRebootingClusterNodes = "rebooting cluster nodes" - CacheClusterStatusRestoreFailed = "restore-failed" - CacheClusterStatusSnapshotting = "snapshotting" -) - -// StatusCacheCluster fetches the Cache Cluster and its Status -func StatusCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - c, err := FindCacheClusterByID(ctx, conn, cacheClusterID) - if tfresource.NotFound(err) { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - - return c, aws.StringValue(c.CacheClusterStatus), nil - } -} - -const ( - ServerlessCacheAvailable = "available" - ServerlessCacheCreating = "creating" - ServerlessCacheDeleted = "deleted" - ServerlessCacheDeleting = "deleting" - ServerlessCacheModifying = "modifying" -) - -// statusCacheCluster fetches the Cache Cluster and its Status -func statusServerlessCache(ctx context.Context, conn *elasticache_v2.Client, cacheClusterID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - c, err := FindServerlessCacheByID(ctx, conn, cacheClusterID) - if tfresource.NotFound(err) { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - - return c, aws.StringValue(c.Status), nil - } -} - -const ( - GlobalReplicationGroupStatusAvailable = "available" - GlobalReplicationGroupStatusCreating = "creating" - GlobalReplicationGroupStatusModifying = "modifying" - GlobalReplicationGroupStatusPrimaryOnly = "primary-only" - GlobalReplicationGroupStatusDeleting = "deleting" - GlobalReplicationGroupStatusDeleted = "deleted" -) - -// statusGlobalReplicationGroup fetches the Global Replication Group and its Status -func statusGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - grg, err := FindGlobalReplicationGroupByID(ctx, conn, globalReplicationGroupID) - if tfresource.NotFound(err) { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - - return grg, aws.StringValue(grg.Status), nil - } -} - -const ( - GlobalReplicationGroupMemberStatusAssociated = "associated" -) - -// statusGlobalReplicationGroupMember fetches a Global Replication Group Member and its Status -func statusGlobalReplicationGroupMember(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - member, err := FindGlobalReplicationGroupMemberByID(ctx, conn, globalReplicationGroupID, id) - if tfresource.NotFound(err) { - return nil, "", nil - } - if err != nil { - return nil, "", err - } - - return member, aws.StringValue(member.Status), nil - } -} diff --git a/internal/service/elasticache/subnet_group_data_source.go b/internal/service/elasticache/subnet_group_data_source.go index 80dd525134c..6f17798e0b8 100644 --- a/internal/service/elasticache/subnet_group_data_source.go +++ b/internal/service/elasticache/subnet_group_data_source.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) // @SDKDataSource("aws_elasticache_subnet_group", name="Subnet Group") @@ -58,7 +59,7 @@ func dataSourceSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta group, err := findCacheSubnetGroupByName(ctx, conn, name) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Subnet Group (%s): %s", name, err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache Subnet Group", err)) } d.SetId(aws.StringValue(group.CacheSubnetGroupName)) diff --git a/internal/service/elasticache/sweep.go b/internal/service/elasticache/sweep.go index 3cbe5799fe2..3093e348e09 100644 --- a/internal/service/elasticache/sweep.go +++ b/internal/service/elasticache/sweep.go @@ -106,7 +106,10 @@ func sweepClusters(region string) error { log.Printf("[ERROR] Failed to delete ElastiCache Cache Cluster (%s): %s", id, err) sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error deleting ElastiCache Cache Cluster (%s): %w", id, err)) } - _, err = WaitCacheClusterDeleted(ctx, conn, id, CacheClusterDeletedTimeout) + const ( + timeout = 40 * time.Minute + ) + _, err = waitCacheClusterDeleted(ctx, conn, id, timeout) if err != nil { log.Printf("[ERROR] Failed waiting for ElastiCache Cache Cluster (%s) to be deleted: %s", id, err) sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error deleting ElastiCache Cache Cluster (%s): waiting for completion: %w", id, err)) @@ -156,10 +159,8 @@ func sweepGlobalReplicationGroups(region string) error { log.Printf("[INFO] Deleting ElastiCache Global Replication Group: %s", id) err := deleteGlobalReplicationGroup(ctx, conn, id, sweeperGlobalReplicationGroupDefaultUpdatedTimeout, globalReplicationGroupDefaultDeletedTimeout) - if err != nil { - return fmt.Errorf("deleting ElastiCache Global Replication Group (%s): %w", id, err) - } - return nil + + return err }) } @@ -241,7 +242,7 @@ func sweepReplicationGroups(region string) error { } for _, replicationGroup := range page.ReplicationGroups { - r := ResourceReplicationGroup() + r := resourceReplicationGroup() d := r.Data(nil) if replicationGroup.GlobalReplicationGroupInfo != nil { @@ -348,7 +349,7 @@ func sweepUsers(region string) error { continue } - r := ResourceUser() + r := resourceUser() d := r.Data(nil) d.SetId(id) @@ -392,7 +393,7 @@ func sweepUserGroups(region string) error { } for _, v := range page.UserGroups { - r := ResourceUserGroup() + r := resourceUserGroup() d := r.Data(nil) d.SetId(aws.StringValue(v.UserGroupId)) @@ -426,20 +427,16 @@ func DisassociateMembers(ctx context.Context, conn *elasticache.ElastiCache, glo for _, member := range globalReplicationGroup.Members { member := member - if aws.StringValue(member.Role) == GlobalReplicationGroupMemberRolePrimary { + if aws.StringValue(member.Role) == globalReplicationGroupMemberRolePrimary { continue } id := aws.StringValue(globalReplicationGroup.GlobalReplicationGroupId) membersGroup.Go(func() error { - if err := DisassociateReplicationGroup(ctx, conn, id, aws.StringValue(member.ReplicationGroupId), aws.StringValue(member.ReplicationGroupRegion), sweeperGlobalReplicationGroupDisassociationReadyTimeout); err != nil { - sweeperErr := fmt.Errorf( - "error disassociating ElastiCache Replication Group (%s) in %s from Global Group (%s): %w", - aws.StringValue(member.ReplicationGroupId), aws.StringValue(member.ReplicationGroupRegion), id, err, - ) - log.Printf("[ERROR] %s", sweeperErr) - return sweeperErr + if err := disassociateReplicationGroup(ctx, conn, id, aws.StringValue(member.ReplicationGroupId), aws.StringValue(member.ReplicationGroupRegion), sweeperGlobalReplicationGroupDisassociationReadyTimeout); err != nil { + log.Printf("[ERROR] %s", err) + return err } return nil }) diff --git a/internal/service/elasticache/user.go b/internal/service/elasticache/user.go index 718527378c5..0322a8629a0 100644 --- a/internal/service/elasticache/user.go +++ b/internal/service/elasticache/user.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -28,7 +29,7 @@ import ( // @SDKResource("aws_elasticache_user", name="User") // @Tags(identifierAttribute="arn") -func ResourceUser() *schema.Resource { +func resourceUser() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceUserCreate, ReadWithoutTimeout: resourceUserRead, @@ -204,13 +205,13 @@ func resourceUserRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("access_string", user.AccessString) d.Set("arn", user.ARN) if v := user.Authentication; v != nil { - authenticationMode := map[string]interface{}{ - "passwords": d.Get("authentication_mode.0.passwords"), + tfMap := map[string]interface{}{ "password_count": aws.Int64Value(v.PasswordCount), + "passwords": d.Get("authentication_mode.0.passwords"), "type": aws.StringValue(v.Type), } - if err := d.Set("authentication_mode", []interface{}{authenticationMode}); err != nil { + if err := d.Set("authentication_mode", []interface{}{tfMap}); err != nil { return sdkdiag.AppendErrorf(diags, "setting authentication_mode: %s", err) } } else { @@ -288,12 +289,40 @@ func resourceUserDelete(ctx context.Context, d *schema.ResourceData, meta interf return diags } -func FindUserByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.User, error) { +func findUserByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.User, error) { input := &elasticache.DescribeUsersInput{ UserId: aws.String(id), } - output, err := conn.DescribeUsersWithContext(ctx, input) + return findUser(ctx, conn, input, tfslices.PredicateTrue[*elasticache.User]()) +} + +func findUser(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUsersInput, filter tfslices.Predicate[*elasticache.User]) (*elasticache.User, error) { + output, err := findUsers(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findUsers(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUsersInput, filter tfslices.Predicate[*elasticache.User]) ([]*elasticache.User, error) { + var output []*elasticache.User + + err := conn.DescribeUsersPagesWithContext(ctx, input, func(page *elasticache.DescribeUsersOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Users { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeUserNotFoundFault) { return nil, &retry.NotFoundError{ @@ -306,20 +335,12 @@ func FindUserByID(ctx context.Context, conn *elasticache.ElastiCache, id string) return nil, err } - if output == nil || len(output.Users) == 0 || output.Users[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output.Users); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output.Users[0], nil + return output, nil } func statusUser(ctx context.Context, conn *elasticache.ElastiCache, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindUserByID(ctx, conn, id) + output, err := findUserByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil diff --git a/internal/service/elasticache/user_data_source.go b/internal/service/elasticache/user_data_source.go index 5e0d3015743..b5955708605 100644 --- a/internal/service/elasticache/user_data_source.go +++ b/internal/service/elasticache/user_data_source.go @@ -14,8 +14,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKDataSource("aws_elasticache_user") -func DataSourceUser() *schema.Resource { +// @SDKDataSource("aws_elasticache_user", name="User") +func dataSourceUser() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceUserRead, @@ -71,29 +71,24 @@ func dataSourceUserRead(ctx context.Context, d *schema.ResourceData, meta interf var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - user, err := FindUserByID(ctx, conn, d.Get("user_id").(string)) - if tfresource.NotFound(err) { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Cache Cluster (%s): Not found. Please change your search criteria and try again: %s", d.Get("user_id").(string), err) - } + user, err := findUserByID(ctx, conn, d.Get("user_id").(string)) + if err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Cache Cluster (%s): %s", d.Get("user_id").(string), err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache User", err)) } d.SetId(aws.StringValue(user.UserId)) - d.Set("access_string", user.AccessString) - if v := user.Authentication; v != nil { - authenticationMode := map[string]interface{}{ + tfMap := map[string]interface{}{ "password_count": aws.Int64Value(v.PasswordCount), "type": aws.StringValue(v.Type), } - if err := d.Set("authentication_mode", []interface{}{authenticationMode}); err != nil { + if err := d.Set("authentication_mode", []interface{}{tfMap}); err != nil { return sdkdiag.AppendErrorf(diags, "setting authentication_mode: %s", err) } } - d.Set("engine", user.Engine) d.Set("user_id", user.UserId) d.Set("user_name", user.UserName) diff --git a/internal/service/elasticache/user_group.go b/internal/service/elasticache/user_group.go index 76283038037..e68f9766b42 100644 --- a/internal/service/elasticache/user_group.go +++ b/internal/service/elasticache/user_group.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -28,7 +29,7 @@ import ( // @SDKResource("aws_elasticache_user_group", name="User Group") // @Tags(identifierAttribute="arn") -func ResourceUserGroup() *schema.Resource { +func resourceUserGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceUserGroupCreate, ReadWithoutTimeout: resourceUserGroupRead, @@ -126,7 +127,7 @@ func resourceUserGroupRead(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - userGroup, err := FindUserGroupByID(ctx, conn, d.Id()) + userGroup, err := findUserGroupByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ElastiCache User Group (%s) not found, removing from state", d.Id()) @@ -206,12 +207,40 @@ func resourceUserGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func FindUserGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.UserGroup, error) { +func findUserGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.UserGroup, error) { input := &elasticache.DescribeUserGroupsInput{ UserGroupId: aws.String(id), } - output, err := conn.DescribeUserGroupsWithContext(ctx, input) + return findUserGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.UserGroup]()) +} + +func findUserGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUserGroupsInput, filter tfslices.Predicate[*elasticache.UserGroup]) (*elasticache.UserGroup, error) { + output, err := findUserGroups(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findUserGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUserGroupsInput, filter tfslices.Predicate[*elasticache.UserGroup]) ([]*elasticache.UserGroup, error) { + var output []*elasticache.UserGroup + + err := conn.DescribeUserGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeUserGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.UserGroups { + if v != nil && filter(v) { + output = append(output, v) + } + } + + return !lastPage + }) if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeUserGroupNotFoundFault) { return nil, &retry.NotFoundError{ @@ -224,20 +253,12 @@ func FindUserGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id st return nil, err } - if output == nil || len(output.UserGroups) == 0 || output.UserGroups[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output.UserGroups); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output.UserGroups[0], nil + return output, nil } func statusUserGroup(ctx context.Context, conn *elasticache.ElastiCache, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindUserGroupByID(ctx, conn, id) + output, err := findUserGroupByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil diff --git a/internal/service/elasticache/user_group_association.go b/internal/service/elasticache/user_group_association.go index ab8fb7ad74c..801b6af7520 100644 --- a/internal/service/elasticache/user_group_association.go +++ b/internal/service/elasticache/user_group_association.go @@ -5,9 +5,7 @@ package elasticache import ( "context" - "fmt" "log" - "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -17,12 +15,18 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_elasticache_user_group_association") -func ResourceUserGroupAssociation() *schema.Resource { +const ( + userGroupAssociationResourceIDPartCount = 2 +) + +// @SDKResource("aws_elasticache_user_group_association", name="User Group Association") +func resourceUserGroupAssociation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceUserGroupAssociationCreate, ReadWithoutTimeout: resourceUserGroupAssociationRead, @@ -53,7 +57,7 @@ func resourceUserGroupAssociationCreate(ctx context.Context, d *schema.ResourceD userGroupID := d.Get("user_group_id").(string) userID := d.Get("user_id").(string) - id := userGroupAssociationCreateResourceID(userGroupID, userID) + id := errs.Must(flex.FlattenResourceId([]string{userGroupID, userID}, userGroupAssociationResourceIDPartCount, true)) input := &elasticache.ModifyUserGroupInput{ UserGroupId: aws.String(userGroupID), UserIdsToAdd: aws.StringSlice([]string{userID}), @@ -80,13 +84,13 @@ func resourceUserGroupAssociationRead(ctx context.Context, d *schema.ResourceDat var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - userGroupID, userID, err := UserGroupAssociationParseResourceID(d.Id()) - + parts, err := flex.ExpandResourceId(d.Id(), userGroupAssociationResourceIDPartCount, true) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - err = FindUserGroupAssociation(ctx, conn, userGroupID, userID) + userGroupID, userID := parts[0], parts[1] + err = findUserGroupAssociationByTwoPartKey(ctx, conn, userGroupID, userID) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ElastiCache User Group Association (%s) not found, removing from state", d.Id()) @@ -108,13 +112,13 @@ func resourceUserGroupAssociationDelete(ctx context.Context, d *schema.ResourceD var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) - userGroupID, userID, err := UserGroupAssociationParseResourceID(d.Id()) - + parts, err := flex.ExpandResourceId(d.Id(), userGroupAssociationResourceIDPartCount, true) if err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[INFO] Deleting ElastiCache User Group Association: %s", d.Id()) + userGroupID, userID := parts[0], parts[1] _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, 10*time.Minute, func() (interface{}, error) { return conn.ModifyUserGroupWithContext(ctx, &elasticache.ModifyUserGroupInput{ UserGroupId: aws.String(userGroupID), @@ -137,8 +141,8 @@ func resourceUserGroupAssociationDelete(ctx context.Context, d *schema.ResourceD return diags } -func FindUserGroupAssociation(ctx context.Context, conn *elasticache.ElastiCache, userGroupID, userID string) error { - userGroup, err := FindUserGroupByID(ctx, conn, userGroupID) +func findUserGroupAssociationByTwoPartKey(ctx context.Context, conn *elasticache.ElastiCache, userGroupID, userID string) error { + userGroup, err := findUserGroupByID(ctx, conn, userGroupID) if err != nil { return err @@ -152,20 +156,3 @@ func FindUserGroupAssociation(ctx context.Context, conn *elasticache.ElastiCache return &retry.NotFoundError{} } - -const userGroupAssociationResourceIDSeparator = "," - -func userGroupAssociationCreateResourceID(userGroupID, userID string) string { - parts := []string{userGroupID, userID} - id := strings.Join(parts, userGroupAssociationResourceIDSeparator) - return id -} - -func UserGroupAssociationParseResourceID(id string) (string, string, error) { - parts := strings.Split(id, userGroupAssociationResourceIDSeparator) - if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - return parts[0], parts[1], nil - } - - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected '%[2]s'", id, userGroupAssociationResourceIDSeparator) -} diff --git a/internal/service/elasticache/user_group_association_test.go b/internal/service/elasticache/user_group_association_test.go index 87e1556630c..c00e8780cc1 100644 --- a/internal/service/elasticache/user_group_association_test.go +++ b/internal/service/elasticache/user_group_association_test.go @@ -147,13 +147,7 @@ func testAccCheckUserGroupAssociationDestroy(ctx context.Context) resource.TestC continue } - userGroupID, userID, err := tfelasticache.UserGroupAssociationParseResourceID(rs.Primary.ID) - - if err != nil { - return err - } - - err = tfelasticache.FindUserGroupAssociation(ctx, conn, userGroupID, userID) + err := tfelasticache.FindUserGroupAssociationByTwoPartKey(ctx, conn, rs.Primary.Attributes["user_group_id"], rs.Primary.Attributes["user_id"]) if tfresource.NotFound(err) { continue @@ -177,19 +171,9 @@ func testAccCheckUserGroupAssociationExists(ctx context.Context, n string) resou return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ElastiCache User Group Association ID is set") - } - - userGroupID, userID, err := tfelasticache.UserGroupAssociationParseResourceID(rs.Primary.ID) - - if err != nil { - return err - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) - err = tfelasticache.FindUserGroupAssociation(ctx, conn, userGroupID, userID) + err := tfelasticache.FindUserGroupAssociationByTwoPartKey(ctx, conn, rs.Primary.Attributes["user_group_id"], rs.Primary.Attributes["user_id"]) return err } diff --git a/internal/service/elasticache/wait.go b/internal/service/elasticache/wait.go deleted file mode 100644 index 75537b46294..00000000000 --- a/internal/service/elasticache/wait.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package elasticache - -import ( - "context" - "time" - - elasticache_v2 "github.com/aws/aws-sdk-go-v2/service/elasticache" - awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -const ( - ReplicationGroupDefaultCreatedTimeout = 60 * time.Minute - ReplicationGroupDefaultUpdatedTimeout = 40 * time.Minute - ReplicationGroupDefaultDeletedTimeout = 40 * time.Minute - - replicationGroupAvailableMinTimeout = 10 * time.Second - replicationGroupAvailableCreateDelay = 30 * time.Second - replicationGroupAvailableModifyDelay = 30 * time.Second - replicationGroupAvailableReadDelay = 0 * time.Second - - replicationGroupDeletedMinTimeout = 10 * time.Second - replicationGroupDeletedDelay = 30 * time.Second -) - -// WaitReplicationGroupAvailable waits for a ReplicationGroup to return Available -func WaitReplicationGroupAvailable(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration, delay time.Duration) (*elasticache.ReplicationGroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - ReplicationGroupStatusCreating, - ReplicationGroupStatusModifying, - ReplicationGroupStatusSnapshotting, - }, - Target: []string{ReplicationGroupStatusAvailable}, - Refresh: StatusReplicationGroup(ctx, conn, replicationGroupID), - Timeout: timeout, - MinTimeout: replicationGroupAvailableMinTimeout, - Delay: delay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*elasticache.ReplicationGroup); ok { - return v, err - } - return nil, err -} - -// WaitReplicationGroupDeleted waits for a ReplicationGroup to be deleted -func WaitReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - ReplicationGroupStatusCreating, - ReplicationGroupStatusAvailable, - ReplicationGroupStatusDeleting, - }, - Target: []string{}, - Refresh: StatusReplicationGroup(ctx, conn, replicationGroupID), - Timeout: timeout, - MinTimeout: replicationGroupDeletedMinTimeout, - Delay: replicationGroupDeletedDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*elasticache.ReplicationGroup); ok { - return v, err - } - return nil, err -} - -// WaitReplicationGroupMemberClustersAvailable waits for all of a ReplicationGroup's Member Clusters to return Available -func WaitReplicationGroupMemberClustersAvailable(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) ([]*elasticache.CacheCluster, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - CacheClusterStatusCreating, - CacheClusterStatusDeleting, - CacheClusterStatusModifying, - }, - Target: []string{CacheClusterStatusAvailable}, - Refresh: StatusReplicationGroupMemberClusters(ctx, conn, replicationGroupID), - Timeout: timeout, - MinTimeout: cacheClusterAvailableMinTimeout, - Delay: cacheClusterAvailableDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.([]*elasticache.CacheCluster); ok { - return v, err - } - return nil, err -} - -const ( - CacheClusterCreatedTimeout = 40 * time.Minute - CacheClusterUpdatedTimeout = 80 * time.Minute - CacheClusterDeletedTimeout = 40 * time.Minute - - cacheClusterAvailableMinTimeout = 10 * time.Second - cacheClusterAvailableDelay = 30 * time.Second - - cacheClusterDeletedMinTimeout = 10 * time.Second - cacheClusterDeletedDelay = 30 * time.Second -) - -// waitCacheClusterAvailable waits for a Cache Cluster to return Available -func waitCacheClusterAvailable(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.CacheCluster, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{ - CacheClusterStatusCreating, - CacheClusterStatusModifying, - CacheClusterStatusSnapshotting, - CacheClusterStatusRebootingClusterNodes, - }, - Target: []string{CacheClusterStatusAvailable}, - Refresh: StatusCacheCluster(ctx, conn, cacheClusterID), - Timeout: timeout, - MinTimeout: cacheClusterAvailableMinTimeout, - Delay: cacheClusterAvailableDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*elasticache.CacheCluster); ok { - return v, err - } - return nil, err -} - -const ( - ServerlessCacheAvailableMinTimeout = 10 * time.Second - ServerlessCacheAvailableDelay = 30 * time.Second - - ServerlessCacheDeletedMinTimeout = 10 * time.Second - ServerlessCacheDeletedDelay = 30 * time.Second -) - -// waitServerlessCacheAvailable waits for a cache cluster to return available -func waitServerlessCacheAvailable(ctx context.Context, conn *elasticache_v2.Client, cacheClusterID string, timeout time.Duration) (awstypes.ServerlessCache, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - ServerlessCacheCreating, - ServerlessCacheDeleting, - ServerlessCacheModifying, - }, - Target: []string{ServerlessCacheAvailable}, - Refresh: statusServerlessCache(ctx, conn, cacheClusterID), - Timeout: timeout, - MinTimeout: ServerlessCacheAvailableMinTimeout, - Delay: ServerlessCacheAvailableDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(awstypes.ServerlessCache); ok { - return v, err - } - return awstypes.ServerlessCache{}, err -} - -// waitServerlessCacheDeleted waits for a cache cluster to be deleted -func waitServerlessCacheDeleted(ctx context.Context, conn *elasticache_v2.Client, cacheClusterID string, timeout time.Duration) (awstypes.ServerlessCache, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - ServerlessCacheCreating, - ServerlessCacheDeleting, - ServerlessCacheModifying, - }, - Target: []string{}, - Refresh: statusServerlessCache(ctx, conn, cacheClusterID), - Timeout: timeout, - MinTimeout: ServerlessCacheDeletedMinTimeout, - Delay: ServerlessCacheDeletedDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(awstypes.ServerlessCache); ok { - return v, err - } - return awstypes.ServerlessCache{}, err -} - -// WaitCacheClusterDeleted waits for a Cache Cluster to be deleted -func WaitCacheClusterDeleted(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.CacheCluster, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - CacheClusterStatusCreating, - CacheClusterStatusAvailable, - CacheClusterStatusModifying, - CacheClusterStatusDeleting, - CacheClusterStatusIncompatibleNetwork, - CacheClusterStatusRestoreFailed, - CacheClusterStatusSnapshotting, - }, - Target: []string{}, - Refresh: StatusCacheCluster(ctx, conn, cacheClusterID), - Timeout: timeout, - MinTimeout: cacheClusterDeletedMinTimeout, - Delay: cacheClusterDeletedDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*elasticache.CacheCluster); ok { - return v, err - } - return nil, err -} - -const ( - globalReplicationGroupDefaultCreatedTimeout = 60 * time.Minute - globalReplicationGroupDefaultUpdatedTimeout = 60 * time.Minute - globalReplicationGroupDefaultDeletedTimeout = 20 * time.Minute - - globalReplicationGroupAvailableMinTimeout = 10 * time.Second - globalReplicationGroupAvailableDelay = 30 * time.Second - - globalReplicationGroupDeletedMinTimeout = 10 * time.Second - globalReplicationGroupDeletedDelay = 30 * time.Second -) - -// waitGlobalReplicationGroupAvailable waits for a Global Replication Group to be available, -// with status either "available" or "primary-only" -func waitGlobalReplicationGroupAvailable(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{GlobalReplicationGroupStatusCreating, GlobalReplicationGroupStatusModifying}, - Target: []string{GlobalReplicationGroupStatusAvailable, GlobalReplicationGroupStatusPrimaryOnly}, - Refresh: statusGlobalReplicationGroup(ctx, conn, globalReplicationGroupID), - Timeout: timeout, - MinTimeout: globalReplicationGroupAvailableMinTimeout, - Delay: globalReplicationGroupAvailableDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*elasticache.GlobalReplicationGroup); ok { - return v, err - } - return nil, err -} - -// waitGlobalReplicationGroupDeleted waits for a Global Replication Group to be deleted -func waitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroup, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - GlobalReplicationGroupStatusAvailable, - GlobalReplicationGroupStatusPrimaryOnly, - GlobalReplicationGroupStatusModifying, - GlobalReplicationGroupStatusDeleting, - }, - Target: []string{}, - Refresh: statusGlobalReplicationGroup(ctx, conn, globalReplicationGroupID), - Timeout: timeout, - MinTimeout: globalReplicationGroupDeletedMinTimeout, - Delay: globalReplicationGroupDeletedDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*elasticache.GlobalReplicationGroup); ok { - return v, err - } - return nil, err -} - -const ( - // GlobalReplicationGroupDisassociationReadyTimeout specifies how long to wait for a global replication group - // to be in a valid state before disassociating - GlobalReplicationGroupDisassociationReadyTimeout = 45 * time.Minute - - // globalReplicationGroupDisassociationTimeout specifies how long to wait for the actual disassociation - globalReplicationGroupDisassociationTimeout = 20 * time.Minute - - globalReplicationGroupDisassociationMinTimeout = 10 * time.Second - globalReplicationGroupDisassociationDelay = 30 * time.Second -) - -func waitGlobalReplicationGroupMemberDetached(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, id string) (*elasticache.GlobalReplicationGroupMember, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - GlobalReplicationGroupMemberStatusAssociated, - }, - Target: []string{}, - Refresh: statusGlobalReplicationGroupMember(ctx, conn, globalReplicationGroupID, id), - Timeout: globalReplicationGroupDisassociationTimeout, - MinTimeout: globalReplicationGroupDisassociationMinTimeout, - Delay: globalReplicationGroupDisassociationDelay, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*elasticache.GlobalReplicationGroupMember); ok { - return v, err - } - return nil, err -} diff --git a/internal/slices/predicates.go b/internal/slices/predicates.go index b184e6237d7..212a5f76a89 100644 --- a/internal/slices/predicates.go +++ b/internal/slices/predicates.go @@ -3,6 +3,19 @@ package slices +// PredicateAnd returns a Predicate that evaluates to true if all of the specified predicates evaluate to true. +func PredicateAnd[T any](predicates ...Predicate[T]) Predicate[T] { + return func(v T) bool { + for _, predicate := range predicates { + if !predicate(v) { + return false + } + } + + return true + } +} + // PredicateEquals returns a Predicate that evaluates to true if the predicate's argument equals `v`. func PredicateEquals[T comparable](v T) Predicate[T] { return func(x T) bool { diff --git a/internal/slices/predicates_test.go b/internal/slices/predicates_test.go new file mode 100644 index 00000000000..9821e3db5e8 --- /dev/null +++ b/internal/slices/predicates_test.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package slices + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestPredicateAnd(t *testing.T) { + t.Parallel() + + type testCase struct { + input int + predicates []Predicate[int] + expected bool + } + tests := map[string]testCase{ + "all true": { + input: 7, + predicates: []Predicate[int]{ + PredicateEquals(7), + PredicateTrue[int](), + }, + expected: true, + }, + "one false": { + input: 7, + predicates: []Predicate[int]{ + PredicateTrue[int](), + PredicateEquals(7), + PredicateEquals(6), + }, + expected: false, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := PredicateAnd(test.predicates...)(test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 87b71649da2..0f2ed4d3c8d 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -273,7 +273,7 @@ This resource exports the following attributes in addition to the arguments abov [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): * `create` - (Default `60m`) -* `delete` - (Default `40m`) +* `delete` - (Default `45m`) * `update` - (Default `40m`) ## Import