diff --git a/.changelog/33790.txt b/.changelog/33790.txt new file mode 100644 index 00000000000..dec831ae4c6 --- /dev/null +++ b/.changelog/33790.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_docdb_cluster: Add `allow_major_version_upgrade` argument +``` diff --git a/internal/service/docdb/cluster.go b/internal/service/docdb/cluster.go index a0bebacac0b..ab985f5b77e 100644 --- a/internal/service/docdb/cluster.go +++ b/internal/service/docdb/cluster.go @@ -36,8 +36,15 @@ func ResourceCluster() *schema.Resource { ReadWithoutTimeout: resourceClusterRead, UpdateWithoutTimeout: resourceClusterUpdate, DeleteWithoutTimeout: resourceClusterDelete, + Importer: &schema.ResourceImporter{ - StateContext: resourceClusterImport, + StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil + }, }, Timeouts: &schema.ResourceTimeout{ @@ -47,20 +54,31 @@ func ResourceCluster() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "allow_major_version_upgrade": { + Type: schema.TypeBool, + Optional: true, + }, + "apply_immediately": { + Type: schema.TypeBool, + Optional: true, + }, "arn": { Type: schema.TypeString, Computed: true, }, - "availability_zones": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, - ForceNew: true, Computed: true, - Set: schema.HashString, + ForceNew: true, + }, + "backup_retention_period": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ValidateFunc: validation.IntAtMost(35), }, - "cluster_identifier": { Type: schema.TypeString, Optional: true, @@ -77,69 +95,58 @@ func ResourceCluster() *schema.Resource { ConflictsWith: []string{"cluster_identifier"}, ValidateFunc: validIdentifierPrefix, }, - "cluster_members": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, Computed: true, - Set: schema.HashString, }, - - "db_subnet_group_name": { + "cluster_resource_id": { Type: schema.TypeString, - Optional: true, - ForceNew: true, Computed: true, }, - "db_cluster_parameter_group_name": { Type: schema.TypeString, Optional: true, Computed: true, }, - - "endpoint": { + "db_subnet_group_name": { Type: schema.TypeString, + Optional: true, Computed: true, + ForceNew: true, }, - - "global_cluster_identifier": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validGlobalCusterIdentifier, + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, }, - - "reader_endpoint": { - Type: schema.TypeString, - Computed: true, + "enabled_cloudwatch_logs_exports": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + "audit", + "profiler", + }, false), + }, }, - - "hosted_zone_id": { + "endpoint": { Type: schema.TypeString, Computed: true, }, - "engine": { Type: schema.TypeString, Optional: true, - Default: "docdb", ForceNew: true, - ValidateFunc: validEngine(), + Default: engineDocDB, + ValidateFunc: validation.StringInSlice(engine_Values(), false), }, - "engine_version": { Type: schema.TypeString, Optional: true, Computed: true, }, - - "storage_encrypted": { - Type: schema.TypeBool, - Optional: true, - ForceNew: true, - }, - "final_snapshot_identifier": { Type: schema.TypeString, Optional: true, @@ -158,65 +165,46 @@ func ResourceCluster() *schema.Resource { return }, }, - - "skip_final_snapshot": { - Type: schema.TypeBool, - Optional: true, - Default: false, + "global_cluster_identifier": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validGlobalCusterIdentifier, }, - - "master_username": { + "hosted_zone_id": { Type: schema.TypeString, Computed: true, - Optional: true, - ForceNew: true, }, - + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, "master_password": { Type: schema.TypeString, Optional: true, Sensitive: true, }, - - "snapshot_identifier": { + "master_username": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // allow snapshot_idenfitier to be removed without forcing re-creation - return new == "" - }, }, - "port": { Type: schema.TypeInt, Optional: true, - Default: 27017, ForceNew: true, + Default: 27017, ValidateFunc: validation.IntBetween(1150, 65535), }, - - "apply_immediately": { - Type: schema.TypeBool, - Optional: true, - Computed: true, - }, - - "vpc_security_group_ids": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "preferred_backup_window": { Type: schema.TypeString, Optional: true, Computed: true, ValidateFunc: verify.ValidOnceADayWindowFormat, }, - "preferred_maintenance_window": { Type: schema.TypeString, Optional: true, @@ -229,61 +217,43 @@ func ResourceCluster() *schema.Resource { }, ValidateFunc: verify.ValidOnceAWeekWindowFormat, }, - - "backup_retention_period": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - ValidateFunc: validation.IntAtMost(35), - }, - - "kms_key_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: verify.ValidARN, - }, - - "cluster_resource_id": { + "reader_endpoint": { Type: schema.TypeString, Computed: true, }, - - "enabled_cloudwatch_logs_exports": { - Type: schema.TypeList, + "skip_final_snapshot": { + Type: schema.TypeBool, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{ - "audit", - "profiler", - }, false), + Default: false, + }, + "snapshot_identifier": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // allow snapshot_idenfitier to be removed without forcing re-creation + return new == "" }, }, - - "deletion_protection": { + "storage_encrypted": { Type: schema.TypeBool, Optional: true, + ForceNew: true, }, - names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "vpc_security_group_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, CustomizeDiff: verify.SetTagsDiff, } } -func resourceClusterImport(ctx context.Context, - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched - // from any API call, so we need to default skip_final_snapshot to true so - // that final_snapshot_identifier is not required - d.Set("skip_final_snapshot", true) - return []*schema.ResourceData{d}, nil -} - func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DocDBConn(ctx) @@ -308,77 +278,68 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if _, ok := d.GetOk("snapshot_identifier"); ok { - opts := docdb.RestoreDBClusterFromSnapshotInput{ + input := &docdb.RestoreDBClusterFromSnapshotInput{ DBClusterIdentifier: aws.String(identifier), + DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), Engine: aws.String(d.Get("engine").(string)), SnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)), - DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), Tags: getTagsIn(ctx), } - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - opts.AvailabilityZones = flex.ExpandStringSet(attr) + if v := d.Get("availability_zones").(*schema.Set); v.Len() > 0 { + input.AvailabilityZones = flex.ExpandStringSet(v) } - if attr, ok := d.GetOk("backup_retention_period"); ok { - modifyDbClusterInput.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("backup_retention_period"); ok { + modifyDbClusterInput.BackupRetentionPeriod = aws.Int64(int64(v.(int))) requiresModifyDbCluster = true } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - opts.DBSubnetGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + modifyDbClusterInput.DBClusterParameterGroupName = aws.String(v.(string)) + requiresModifyDbCluster = true } - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - modifyDbClusterInput.DBClusterParameterGroupName = aws.String(attr.(string)) - requiresModifyDbCluster = true + if v, ok := d.GetOk("db_subnet_group_name"); ok { + input.DBSubnetGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && len(attr.([]interface{})) > 0 { - opts.EnableCloudwatchLogsExports = flex.ExpandStringList(attr.([]interface{})) + if v, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && len(v.([]interface{})) > 0 { + input.EnableCloudwatchLogsExports = flex.ExpandStringList(v.([]interface{})) } - if attr, ok := d.GetOk("engine_version"); ok { - opts.EngineVersion = aws.String(attr.(string)) + if v, ok := d.GetOk("engine_version"); ok { + input.EngineVersion = aws.String(v.(string)) } - if attr, ok := d.GetOk("kms_key_id"); ok { - opts.KmsKeyId = aws.String(attr.(string)) + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) } - if attr, ok := d.GetOk("port"); ok { - opts.Port = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("port"); ok { + input.Port = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("preferred_backup_window"); ok { - modifyDbClusterInput.PreferredBackupWindow = aws.String(attr.(string)) + if v, ok := d.GetOk("preferred_backup_window"); ok { + modifyDbClusterInput.PreferredBackupWindow = aws.String(v.(string)) requiresModifyDbCluster = true } - if attr, ok := d.GetOk("preferred_maintenance_window"); ok { - modifyDbClusterInput.PreferredMaintenanceWindow = aws.String(attr.(string)) + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + modifyDbClusterInput.PreferredMaintenanceWindow = aws.String(v.(string)) requiresModifyDbCluster = true } - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - opts.VpcSecurityGroupIds = flex.ExpandStringSet(attr) + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v) } - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { - _, err := conn.RestoreDBClusterFromSnapshotWithContext(ctx, &opts) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { - return retry.RetryableError(err) - } - return retry.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.RestoreDBClusterFromSnapshotWithContext(ctx, &opts) - } + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { + return conn.RestoreDBClusterFromSnapshotWithContext(ctx, input) + }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating DocumentDB Cluster: %s", err) + return sdkdiag.AppendErrorf(diags, "creating DocumentDB Cluster (restore from snapshot) (%s): %s", identifier, err) } } else { // Secondary DocDB clusters part of a global cluster will not supply the master_password @@ -395,121 +356,93 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } } - createOpts := &docdb.CreateDBClusterInput{ + input := &docdb.CreateDBClusterInput{ DBClusterIdentifier: aws.String(identifier), + DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), Engine: aws.String(d.Get("engine").(string)), - MasterUserPassword: aws.String(d.Get("master_password").(string)), MasterUsername: aws.String(d.Get("master_username").(string)), - DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), + MasterUserPassword: aws.String(d.Get("master_password").(string)), Tags: getTagsIn(ctx), } - if attr, ok := d.GetOk("global_cluster_identifier"); ok { - createOpts.GlobalClusterIdentifier = aws.String(attr.(string)) + if v := d.Get("availability_zones").(*schema.Set); v.Len() > 0 { + input.AvailabilityZones = flex.ExpandStringSet(v) } - if attr, ok := d.GetOk("port"); ok { - createOpts.Port = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("backup_retention_period"); ok { + input.BackupRetentionPeriod = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + input.DBClusterParameterGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_subnet_group_name"); ok { + input.DBSubnetGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("engine_version"); ok { - createOpts.EngineVersion = aws.String(attr.(string)) + if v, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && len(v.([]interface{})) > 0 { + input.EnableCloudwatchLogsExports = flex.ExpandStringList(v.([]interface{})) } - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("engine_version"); ok { + input.EngineVersion = aws.String(v.(string)) } - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("global_cluster_identifier"); ok { + input.GlobalClusterIdentifier = aws.String(v.(string)) } - if v, ok := d.GetOk("backup_retention_period"); ok { - createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("port"); ok { + input.Port = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("preferred_backup_window"); ok { - createOpts.PreferredBackupWindow = aws.String(v.(string)) + input.PreferredBackupWindow = aws.String(v.(string)) } if v, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + input.PreferredMaintenanceWindow = aws.String(v.(string)) } - if attr, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(attr.(string)) + if v, ok := d.GetOkExists("storage_encrypted"); ok { + input.StorageEncrypted = aws.Bool(v.(bool)) } - if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && len(attr.([]interface{})) > 0 { - createOpts.EnableCloudwatchLogsExports = flex.ExpandStringList(attr.([]interface{})) + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v) } - if attr, ok := d.GetOkExists("storage_encrypted"); ok { - createOpts.StorageEncrypted = aws.Bool(attr.(bool)) - } + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { + return conn.CreateDBClusterWithContext(ctx, input) + }, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") - err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { - var err error - _, err = conn.CreateDBClusterWithContext(ctx, createOpts) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { - return retry.RetryableError(err) - } - return retry.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.CreateDBClusterWithContext(ctx, createOpts) - } if err != nil { - return sdkdiag.AppendErrorf(diags, "creating DocumentDB cluster: %s", err) + return sdkdiag.AppendErrorf(diags, "creating DocumentDB Cluster (%s): %s", identifier, err) } } d.SetId(identifier) - log.Printf("[INFO] DocumentDB Cluster ID: %s", d.Id()) - - log.Println( - "[INFO] Waiting for DocumentDB Cluster to be available") - - stateConf := &retry.StateChangeConf{ - Pending: resourceClusterCreatePendingStates, - Target: []string{"available"}, - Refresh: resourceClusterStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DocumentDB Cluster state to be \"available\": %s", err) + if _, err := waitDBClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DocumentDB Cluster (%s) create: %s", d.Id(), err) } if requiresModifyDbCluster { modifyDbClusterInput.DBClusterIdentifier = aws.String(d.Id()) - log.Printf("[INFO] DocumentDB Cluster (%s) configuration requires ModifyDBCluster: %s", d.Id(), modifyDbClusterInput) _, err := conn.ModifyDBClusterWithContext(ctx, modifyDbClusterInput) + if err != nil { return sdkdiag.AppendErrorf(diags, "modifying DocumentDB Cluster (%s): %s", d.Id(), err) } - log.Printf("[INFO] Waiting for DocumentDB Cluster (%s) to be available", d.Id()) - err = waitForClusterUpdate(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DocumentDB Cluster (%s) to be available: %s", d.Id(), err) + if _, err := waitDBClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DocumentDB Cluster (%s) update: %s", d.Id(), err) } } @@ -520,46 +453,24 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DocDBConn(ctx) - input := &docdb.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(d.Id()), - } - - resp, err := conn.DescribeDBClustersWithContext(ctx, input) + dbc, err := FindDBClusterByID(ctx, conn, d.Id()) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DocumentDB Cluster (%s) not found, removing from state", d.Id()) d.SetId("") - return diags + return nil } if err != nil { - return sdkdiag.AppendErrorf(diags, "describing DocumentDB Cluster (%s): %s", d.Id(), err) - } - - if resp == nil { - return sdkdiag.AppendErrorf(diags, "retrieving DocumentDB cluster: empty response for: %s", input) - } - - var dbc *docdb.DBCluster - for _, c := range resp.DBClusters { - if aws.StringValue(c.DBClusterIdentifier) == d.Id() { - dbc = c - break - } - } - - if !d.IsNewResource() && dbc == nil { - log.Printf("[WARN] DocumentDB Cluster (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags + return sdkdiag.AppendErrorf(diags, "reading DocumentDB Cluster (%s): %s", d.Id(), err) } globalCluster, err := findGlobalClusterByARN(ctx, conn, aws.StringValue(dbc.DBClusterArn)) // Ignore the following API error for regions/partitions that do not support DocDB Global Clusters: // InvalidParameterValue: Access Denied to API Version: APIGlobalDatabases - if err != nil && !tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Access Denied to API Version: APIGlobalDatabases") { - return sdkdiag.AppendErrorf(diags, "reading DocumentDB Global Cluster information for DB Cluster (%s): %s", d.Id(), err) + if err != nil && !tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "Access Denied to API Version: APIGlobalDatabases") { + return sdkdiag.AppendErrorf(diags, "reading DocumentDB Cluster (%s) Global Cluster information: %s", d.Id(), err) } if globalCluster != nil { @@ -568,35 +479,24 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("global_cluster_identifier", "") } - if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting availability_zones: %s", err) - } - d.Set("arn", dbc.DBClusterArn) + d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)) d.Set("backup_retention_period", dbc.BackupRetentionPeriod) d.Set("cluster_identifier", dbc.DBClusterIdentifier) - var cm []string for _, m := range dbc.DBClusterMembers { cm = append(cm, aws.StringValue(m.DBInstanceIdentifier)) } - if err := d.Set("cluster_members", cm); err != nil { - return sdkdiag.AppendErrorf(diags, "setting cluster_members: %s", err) - } - + d.Set("cluster_members", cm) d.Set("cluster_resource_id", dbc.DbClusterResourceId) d.Set("db_cluster_parameter_group_name", dbc.DBClusterParameterGroup) d.Set("db_subnet_group_name", dbc.DBSubnetGroup) - - if err := d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbc.EnabledCloudwatchLogsExports)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting enabled_cloudwatch_logs_exports: %s", err) - } - + d.Set("deletion_protection", dbc.DeletionProtection) + d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbc.EnabledCloudwatchLogsExports)) d.Set("endpoint", dbc.Endpoint) d.Set("engine_version", dbc.EngineVersion) d.Set("engine", dbc.Engine) d.Set("hosted_zone_id", dbc.HostedZoneId) - d.Set("kms_key_id", dbc.KmsKeyId) d.Set("master_username", dbc.MasterUsername) d.Set("port", dbc.Port) @@ -604,15 +504,11 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("preferred_maintenance_window", dbc.PreferredMaintenanceWindow) d.Set("reader_endpoint", dbc.ReaderEndpoint) d.Set("storage_encrypted", dbc.StorageEncrypted) - d.Set("deletion_protection", dbc.DeletionProtection) - var vpcg []string for _, g := range dbc.VpcSecurityGroups { vpcg = append(vpcg, aws.StringValue(g.VpcSecurityGroupId)) } - if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return sdkdiag.AppendErrorf(diags, "setting vpc_security_group_ids: %s", err) - } + d.Set("vpc_security_group_ids", vpcg) return diags } @@ -620,66 +516,88 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DocDBConn(ctx) - requestUpdate := false - req := &docdb.ModifyDBClusterInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - DBClusterIdentifier: aws.String(d.Id()), - } + if d.HasChangesExcept("tags", "tags_all", "global_cluster_identifier", "skip_final_snapshot") { + input := &docdb.ModifyDBClusterInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + DBClusterIdentifier: aws.String(d.Id()), + } - if d.HasChange("master_password") { - req.MasterUserPassword = aws.String(d.Get("master_password").(string)) - requestUpdate = true - } + if v, ok := d.GetOk("allow_major_version_upgrade"); ok { + input.AllowMajorVersionUpgrade = aws.Bool(v.(bool)) + } - if d.HasChange("engine_version") { - req.EngineVersion = aws.String(d.Get("engine_version").(string)) - requestUpdate = true - } + if d.HasChange("backup_retention_period") { + input.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) + } - if d.HasChange("vpc_security_group_ids") { - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - req.VpcSecurityGroupIds = flex.ExpandStringSet(attr) - } else { - req.VpcSecurityGroupIds = []*string{} + if d.HasChange("db_cluster_parameter_group_name") { + input.DBClusterParameterGroupName = aws.String(d.Get("db_cluster_parameter_group_name").(string)) } - requestUpdate = true - } - if d.HasChange("preferred_backup_window") { - req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) - requestUpdate = true - } + if d.HasChange("deletion_protection") { + input.DeletionProtection = aws.Bool(d.Get("deletion_protection").(bool)) + } - if d.HasChange("preferred_maintenance_window") { - req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) - requestUpdate = true - } + if d.HasChange("enabled_cloudwatch_logs_exports") { + input.CloudwatchLogsExportConfiguration = expandCloudwatchLogsExportConfiguration(d) + } - if d.HasChange("backup_retention_period") { - req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) - requestUpdate = true - } + if d.HasChange("engine_version") { + input.EngineVersion = aws.String(d.Get("engine_version").(string)) + } - if d.HasChange("db_cluster_parameter_group_name") { - req.DBClusterParameterGroupName = aws.String(d.Get("db_cluster_parameter_group_name").(string)) - requestUpdate = true - } + if d.HasChange("master_password") { + input.MasterUserPassword = aws.String(d.Get("master_password").(string)) + } - if d.HasChange("enabled_cloudwatch_logs_exports") { - req.CloudwatchLogsExportConfiguration = buildCloudWatchLogsExportConfiguration(d) - requestUpdate = true - } + if d.HasChange("preferred_backup_window") { + input.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) + } - if d.HasChange("deletion_protection") { - req.DeletionProtection = aws.Bool(d.Get("deletion_protection").(bool)) - requestUpdate = true + if d.HasChange("preferred_maintenance_window") { + input.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) + } + + if d.HasChange("vpc_security_group_ids") { + if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v) + } else { + input.VpcSecurityGroupIds = aws.StringSlice([]string{}) + } + } + + _, err := tfresource.RetryWhen(ctx, 5*time.Minute, + func() (interface{}, error) { + return conn.ModifyDBClusterWithContext(ctx, input) + }, + func(err error) (bool, error) { + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") { + return true, err + } + if tfawserr.ErrMessageContains(err, docdb.ErrCodeInvalidDBClusterStateFault, "is not currently in the available state") { + return true, err + } + if tfawserr.ErrMessageContains(err, docdb.ErrCodeInvalidDBClusterStateFault, "cluster is a part of a global cluster") { + return true, err + } + + return false, err + }, + ) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "modifying DocumentDB Cluster (%s): %s", d.Id(), err) + } + + if _, err := waitDBClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DocumentDB Cluster (%s) update: %s", d.Id(), err) + } } if d.HasChange("global_cluster_identifier") { oRaw, nRaw := d.GetChange("global_cluster_identifier") - o := oRaw.(string) - n := nRaw.(string) + o, n := oRaw.(string), nRaw.(string) if o == "" { return sdkdiag.AppendErrorf(diags, "existing DocumentDB Clusters cannot be added to an existing DocumentDB Global Cluster") @@ -696,52 +614,19 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int _, err := conn.RemoveFromGlobalClusterWithContext(ctx, input) - if err != nil && !tfawserr.ErrCodeEquals(err, docdb.ErrCodeGlobalClusterNotFoundFault) && !tfawserr.ErrMessageContains(err, "InvalidParameterValue", "is not found in global cluster") { + if err != nil && !tfawserr.ErrCodeEquals(err, docdb.ErrCodeGlobalClusterNotFoundFault) && !tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "is not found in global cluster") { return sdkdiag.AppendErrorf(diags, "removing DocumentDB Cluster (%s) from DocumentDB Global Cluster: %s", d.Id(), err) } } - if requestUpdate { - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { - _, err := conn.ModifyDBClusterWithContext(ctx, req) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { - return retry.RetryableError(err) - } - - if tfawserr.ErrMessageContains(err, docdb.ErrCodeInvalidDBClusterStateFault, "is not currently in the available state") { - return retry.RetryableError(err) - } - - if tfawserr.ErrMessageContains(err, docdb.ErrCodeInvalidDBClusterStateFault, "DB cluster is not available for modification") { - return retry.RetryableError(err) - } - - return retry.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.ModifyDBClusterWithContext(ctx, req) - } - if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying DocumentDB Cluster (%s): %s", d.Id(), err) - } - - log.Printf("[INFO] Waiting for DocumentDB Cluster (%s) to be available", d.Id()) - err = waitForClusterUpdate(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DocumentDB Cluster (%s) to be available: %s", d.Id(), err) - } - } - return append(diags, resourceClusterRead(ctx, d, meta)...) } func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DocDBConn(ctx) - log.Printf("[DEBUG] Destroying DocumentDB Cluster (%s)", d.Id()) + + log.Printf("[DEBUG] Deleting DocumentDB Cluster: %s", d.Id()) // Automatically remove from global cluster to bypass this error on deletion: // InvalidDBClusterStateFault: This cluster is a part of a global cluster, please remove it from globalcluster first @@ -753,165 +638,236 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int _, err := conn.RemoveFromGlobalClusterWithContext(ctx, input) - if err != nil && !tfawserr.ErrCodeEquals(err, docdb.ErrCodeGlobalClusterNotFoundFault) && !tfawserr.ErrMessageContains(err, "InvalidParameterValue", "is not found in global cluster") { - return sdkdiag.AppendErrorf(diags, "removing DocumentDB Cluster (%s) from DocumentDB Global Cluster: %s", d.Id(), err) + if err != nil && !tfawserr.ErrCodeEquals(err, docdb.ErrCodeGlobalClusterNotFoundFault) && !tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "is not found in global cluster") { + return sdkdiag.AppendErrorf(diags, "removing DocumentDB Cluster (%s) from Global Cluster: %s", d.Id(), err) } } - deleteOpts := docdb.DeleteDBClusterInput{ + input := &docdb.DeleteDBClusterInput{ DBClusterIdentifier: aws.String(d.Id()), } skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) - deleteOpts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) + input.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) if !skipFinalSnapshot { - if name, present := d.GetOk("final_snapshot_identifier"); present { - deleteOpts.FinalDBSnapshotIdentifier = aws.String(name.(string)) + if v, ok := d.GetOk("final_snapshot_identifier"); ok { + input.FinalDBSnapshotIdentifier = aws.String(v.(string)) } else { return sdkdiag.AppendErrorf(diags, "DocumentDB Cluster FinalSnapshotIdentifier is required when a final snapshot is required") } } - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { - _, err := conn.DeleteDBClusterWithContext(ctx, &deleteOpts) - if err != nil { + _, err := tfresource.RetryWhen(ctx, 5*time.Minute, + func() (interface{}, error) { + return conn.DeleteDBClusterWithContext(ctx, input) + }, + func(err error) (bool, error) { if tfawserr.ErrMessageContains(err, docdb.ErrCodeInvalidDBClusterStateFault, "is not currently in the available state") { - return retry.RetryableError(err) + return true, err } if tfawserr.ErrMessageContains(err, docdb.ErrCodeInvalidDBClusterStateFault, "cluster is a part of a global cluster") { - return retry.RetryableError(err) - } - if tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) { - return nil + return true, err } - return retry.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteDBClusterWithContext(ctx, &deleteOpts) - } - if err != nil { - return sdkdiag.AppendErrorf(diags, "DocumentDB Cluster cannot be deleted: %s", err) - } - stateConf := &retry.StateChangeConf{ - Pending: resourceClusterDeletePendingStates, - Target: []string{"destroyed"}, - Refresh: resourceClusterStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, + return false, err + }, + ) + + if tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) { + return diags } - // Wait, catching any errors - _, err = stateConf.WaitForStateContext(ctx) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting DocumentDB Cluster (%s): %s", d.Id(), err) } + if _, err := waitDBClusterDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DocumentDB Cluster (%s) delete: %s", d.Id(), err) + } + return diags } -func resourceClusterStateRefreshFunc(ctx context.Context, conn *docdb.DocDB, dbClusterIdentifier string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeDBClustersWithContext(ctx, &docdb.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(dbClusterIdentifier), - }) +func expandCloudwatchLogsExportConfiguration(d *schema.ResourceData) *docdb.CloudwatchLogsExportConfiguration { // nosemgrep:ci.caps0-in-func-name + oraw, nraw := d.GetChange("enabled_cloudwatch_logs_exports") + o := oraw.([]interface{}) + n := nraw.([]interface{}) + + create, disable := diffCloudWatchLogsExportConfiguration(o, n) - if tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) { - return 42, "destroyed", nil + return &docdb.CloudwatchLogsExportConfiguration{ + EnableLogTypes: flex.ExpandStringList(create), + DisableLogTypes: flex.ExpandStringList(disable), + } +} + +func diffCloudWatchLogsExportConfiguration(old, new []interface{}) ([]interface{}, []interface{}) { + add := make([]interface{}, 0) + disable := make([]interface{}, 0) + + for _, n := range new { + if _, contains := verify.SliceContainsString(old, n.(string)); !contains { + add = append(add, n) } + } - if err != nil { - return nil, "", err + for _, o := range old { + if _, contains := verify.SliceContainsString(new, o.(string)); !contains { + disable = append(disable, o) } + } + + return add, disable +} - var dbc *docdb.DBCluster +func FindDBClusterByID(ctx context.Context, conn *docdb.DocDB, id string) (*docdb.DBCluster, error) { + input := &docdb.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(id), + } + output, err := findDBCluster(ctx, conn, input) - for _, c := range resp.DBClusters { - if aws.StringValue(c.DBClusterIdentifier) == dbClusterIdentifier { - dbc = c - } + if err != nil { + return nil, err + } + + // Eventual consistency check. + if aws.StringValue(output.DBClusterIdentifier) != id { + return nil, &retry.NotFoundError{ + LastRequest: input, } + } + + return output, nil +} + +func findDBCluster(ctx context.Context, conn *docdb.DocDB, input *docdb.DescribeDBClustersInput) (*docdb.DBCluster, error) { + output, err := findDBClusters(ctx, conn, input) - if dbc == nil { - return 42, "destroyed", nil + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findDBClusters(ctx context.Context, conn *docdb.DocDB, input *docdb.DescribeDBClustersInput) ([]*docdb.DBCluster, error) { + var output []*docdb.DBCluster + + err := conn.DescribeDBClustersPagesWithContext(ctx, input, func(page *docdb.DescribeDBClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - if dbc.Status != nil { - log.Printf("[DEBUG] DB Cluster status (%s): %s", dbClusterIdentifier, *dbc.Status) + for _, v := range page.DBClusters { + if v != nil { + output = append(output, v) + } } - return dbc, aws.StringValue(dbc.Status), nil + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } } -} -var resourceClusterCreatePendingStates = []string{ - "creating", - "backing-up", - "modifying", - "preparing-data-migration", - "migrating", - "resetting-master-credentials", -} + if err != nil { + return nil, err + } -var resourceClusterDeletePendingStates = []string{ - "available", - "deleting", - "backing-up", - "modifying", + return output, nil } -var resourceClusterUpdatePendingStates = []string{ - "backing-up", - "modifying", - "resetting-master-credentials", - "upgrading", +func statusDBCluster(ctx context.Context, conn *docdb.DocDB, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindDBClusterByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status), nil + } } -func waitForClusterUpdate(ctx context.Context, conn *docdb.DocDB, id string, timeout time.Duration) error { +func waitDBClusterCreated(ctx context.Context, conn *docdb.DocDB, id string, timeout time.Duration) (*docdb.DBCluster, error) { stateConf := &retry.StateChangeConf{ - Pending: resourceClusterUpdatePendingStates, + Pending: []string{ + "creating", + "backing-up", + "modifying", + "preparing-data-migration", + "migrating", + "resetting-master-credentials", + }, Target: []string{"available"}, - Refresh: resourceClusterStateRefreshFunc(ctx, conn, id), + Refresh: statusDBCluster(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting + Delay: 30 * time.Second, } - _, err := stateConf.WaitForStateContext(ctx) - return err + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*docdb.DBCluster); ok { + return output, err + } + + return nil, err } -func buildCloudWatchLogsExportConfiguration(d *schema.ResourceData) *docdb.CloudwatchLogsExportConfiguration { - oraw, nraw := d.GetChange("enabled_cloudwatch_logs_exports") - o := oraw.([]interface{}) - n := nraw.([]interface{}) +func waitDBClusterUpdated(ctx context.Context, conn *docdb.DocDB, id string, timeout time.Duration) (*docdb.DBCluster, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{ + "backing-up", + "modifying", + "resetting-master-credentials", + "upgrading", + }, + Target: []string{"available"}, + Refresh: statusDBCluster(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } - create, disable := diffCloudWatchLogsExportConfiguration(o, n) + outputRaw, err := stateConf.WaitForStateContext(ctx) - return &docdb.CloudwatchLogsExportConfiguration{ - EnableLogTypes: flex.ExpandStringList(create), - DisableLogTypes: flex.ExpandStringList(disable), + if output, ok := outputRaw.(*docdb.DBCluster); ok { + return output, err } -} -func diffCloudWatchLogsExportConfiguration(old, new []interface{}) ([]interface{}, []interface{}) { - add := make([]interface{}, 0) - disable := make([]interface{}, 0) + return nil, err +} - for _, n := range new { - if _, contains := verify.SliceContainsString(old, n.(string)); !contains { - add = append(add, n) - } +func waitDBClusterDeleted(ctx context.Context, conn *docdb.DocDB, id string, timeout time.Duration) (*docdb.DBCluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + "available", + "deleting", + "backing-up", + "modifying", + }, + Target: []string{}, + Refresh: statusDBCluster(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, } - for _, o := range old { - if _, contains := verify.SliceContainsString(new, o.(string)); !contains { - disable = append(disable, o) - } + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*docdb.DBCluster); ok { + return output, err } - return add, disable + return nil, err } diff --git a/internal/service/docdb/cluster_instance.go b/internal/service/docdb/cluster_instance.go index c5dbc870e81..50c989ac1d9 100644 --- a/internal/service/docdb/cluster_instance.go +++ b/internal/service/docdb/cluster_instance.go @@ -97,8 +97,8 @@ func ResourceClusterInstance() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Default: "docdb", - ValidateFunc: validEngine(), + Default: engineDocDB, + ValidateFunc: validation.StringInSlice(engine_Values(), false), }, "engine_version": { Type: schema.TypeString, diff --git a/internal/service/docdb/cluster_test.go b/internal/service/docdb/cluster_test.go index 2306e95241a..37bbd0755b8 100644 --- a/internal/service/docdb/cluster_test.go +++ b/internal/service/docdb/cluster_test.go @@ -7,19 +7,19 @@ import ( "context" "errors" "fmt" - "log" "testing" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/docdb" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfdocdb "github.com/hashicorp/terraform-provider-aws/internal/service/docdb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func init() { @@ -56,10 +56,8 @@ func TestAccDocDBCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "engine", "docdb"), resource.TestCheckResourceAttrSet(resourceName, "engine_version"), resource.TestCheckResourceAttrSet(resourceName, "hosted_zone_id"), - resource.TestCheckResourceAttr(resourceName, - "enabled_cloudwatch_logs_exports.0", "audit"), - resource.TestCheckResourceAttr(resourceName, - "enabled_cloudwatch_logs_exports.1", "profiler"), + resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.0", "audit"), + resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.1", "profiler"), resource.TestCheckResourceAttr(resourceName, "deletion_protection", "false"), ), }, @@ -68,6 +66,7 @@ func TestAccDocDBCluster_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -102,6 +101,7 @@ func TestAccDocDBCluster_namePrefix(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -136,6 +136,7 @@ func TestAccDocDBCluster_generatedName(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -173,11 +174,12 @@ func TestAccDocDBCluster_GlobalClusterIdentifier(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", + "final_snapshot_identifier", "master_password", "skip_final_snapshot", - "snapshot_identifier", }, }, }, @@ -213,11 +215,12 @@ func TestAccDocDBCluster_GlobalClusterIdentifier_Add(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", + "final_snapshot_identifier", "master_password", "skip_final_snapshot", - "snapshot_identifier", }, }, { @@ -254,11 +257,12 @@ func TestAccDocDBCluster_GlobalClusterIdentifier_Remove(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", + "final_snapshot_identifier", "master_password", "skip_final_snapshot", - "snapshot_identifier", }, }, { @@ -299,11 +303,12 @@ func TestAccDocDBCluster_GlobalClusterIdentifier_Update(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", + "final_snapshot_identifier", "master_password", "skip_final_snapshot", - "snapshot_identifier", }, }, { @@ -361,7 +366,7 @@ func TestAccDocDBCluster_takeFinalSnapshot(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, docdb.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterSnapshot(ctx, snapshotName), + CheckDestroy: testAccCheckClusterDestroyWithFinalSnapshot(ctx), Steps: []resource.TestStep{ { Config: testAccClusterConfig_finalSnapshot(rName, snapshotName), @@ -374,6 +379,7 @@ func TestAccDocDBCluster_takeFinalSnapshot(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -429,6 +435,7 @@ func TestAccDocDBCluster_updateTags(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -470,6 +477,7 @@ func TestAccDocDBCluster_updateCloudWatchLogsExports(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -512,6 +520,7 @@ func TestAccDocDBCluster_kmsKey(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -546,6 +555,7 @@ func TestAccDocDBCluster_encrypted(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -585,6 +595,7 @@ func TestAccDocDBCluster_backupsUpdate(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -632,6 +643,7 @@ func TestAccDocDBCluster_port(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -674,6 +686,7 @@ func TestAccDocDBCluster_deleteProtection(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", "apply_immediately", "cluster_identifier_prefix", "final_snapshot_identifier", @@ -706,173 +719,101 @@ func TestAccDocDBCluster_deleteProtection(t *testing.T) { }) } -func testAccClusterConfig_globalIdentifierPrimarySecondary(rNameGlobal, rNamePrimary, rNameSecondary string) string { - return acctest.ConfigCompose( - acctest.ConfigMultipleRegionProvider(2), - fmt.Sprintf(` -data "aws_availability_zones" "alternate" { - provider = "awsalternate" - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_docdb_global_cluster" "test" { - global_cluster_identifier = "%[1]s" - engine = "docdb" - engine_version = "4.0.0" -} - -resource "aws_docdb_cluster" "primary" { - cluster_identifier = "%[2]s" - master_username = "foo" - master_password = "barbarbar" - skip_final_snapshot = true - global_cluster_identifier = aws_docdb_global_cluster.test.id - engine = aws_docdb_global_cluster.test.engine - engine_version = aws_docdb_global_cluster.test.engine_version -} - -resource "aws_docdb_cluster_instance" "primary" { - identifier = "%[2]s" - cluster_identifier = aws_docdb_cluster.primary.id - instance_class = "db.r5.large" -} - -resource "aws_vpc" "alternate" { - provider = "awsalternate" - cidr_block = "10.0.0.0/16" - - tags = { - Name = "%[3]s" - } -} - -resource "aws_subnet" "alternate" { - provider = "awsalternate" - count = 3 - vpc_id = aws_vpc.alternate.id - availability_zone = data.aws_availability_zones.alternate.names[count.index] - cidr_block = "10.0.${count.index}.0/24" - - tags = { - Name = "%[3]s" - } -} - -resource "aws_docdb_subnet_group" "alternate" { - provider = "awsalternate" - name = "%[3]s" - subnet_ids = aws_subnet.alternate[*].id -} - -resource "aws_docdb_cluster" "secondary" { - provider = "awsalternate" - cluster_identifier = "%[3]s" - skip_final_snapshot = true - db_subnet_group_name = aws_docdb_subnet_group.alternate.name - global_cluster_identifier = aws_docdb_global_cluster.test.id - engine = aws_docdb_global_cluster.test.engine - engine_version = aws_docdb_global_cluster.test.engine_version - depends_on = [aws_docdb_cluster_instance.primary] -} - -resource "aws_docdb_cluster_instance" "secondary" { - provider = "awsalternate" - identifier = "%[3]s" - cluster_identifier = aws_docdb_cluster.secondary.id - instance_class = "db.r5.large" -} -`, rNameGlobal, rNamePrimary, rNameSecondary)) -} - -func testAccClusterConfig_globalIdentifierUpdate(rName, globalClusterIdentifierResourceName string) string { - return fmt.Sprintf(` -resource "aws_docdb_global_cluster" "test" { - count = 2 - engine = "docdb" - engine_version = "4.0.0" # version compatible with global - global_cluster_identifier = "%[1]s-${count.index}" -} - -resource "aws_docdb_cluster" "test" { - cluster_identifier = %[1]q - global_cluster_identifier = %[2]s.id - engine_version = %[2]s.engine_version - master_password = "barbarbarbar" - master_username = "foo" - skip_final_snapshot = true -} -`, rName, globalClusterIdentifierResourceName) -} - -func testAccClusterConfig_globalCompatible(rName string) string { - return fmt.Sprintf(` -resource "aws_docdb_cluster" "test" { - cluster_identifier = %[1]q - engine_version = "4.0.0" # version compatible with global - master_password = "barbarbarbar" - master_username = "foo" - skip_final_snapshot = true -} -`, rName) -} - -func testAccClusterConfig_globalIdentifier(rName string) string { - return fmt.Sprintf(` -resource "aws_docdb_global_cluster" "test" { - engine_version = "4.0.0" # version compatible - engine = "docdb" - global_cluster_identifier = %[1]q -} +func TestAccDocDBCluster_updateEngineMajorVersion(t *testing.T) { + ctx := acctest.Context(t) + var dbCluster docdb.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_docdb_cluster.test" -resource "aws_docdb_cluster" "test" { - cluster_identifier = %[1]q - global_cluster_identifier = aws_docdb_global_cluster.test.id - engine_version = aws_docdb_global_cluster.test.engine_version - master_password = "barbarbarbar" - master_username = "foo" - skip_final_snapshot = true -} -`, rName) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, docdb.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_engineVersion(rName, "4.0.0"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "allow_major_version_upgrade", "true"), + resource.TestCheckResourceAttr(resourceName, "apply_immediately", "true"), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "rds", regexache.MustCompile(fmt.Sprintf("cluster:%s", rName))), + resource.TestCheckResourceAttr(resourceName, "availability_zones.#", "3"), + resource.TestCheckResourceAttr(resourceName, "backup_retention_period", "1"), + resource.TestCheckResourceAttr(resourceName, "cluster_identifier", rName), + resource.TestCheckNoResourceAttr(resourceName, "cluster_identifier_prefix"), + resource.TestCheckResourceAttr(resourceName, "cluster_members.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "cluster_resource_id"), + resource.TestCheckResourceAttr(resourceName, "db_cluster_parameter_group_name", "default.docdb4.0"), + resource.TestCheckResourceAttr(resourceName, "db_subnet_group_name", "default"), + resource.TestCheckResourceAttr(resourceName, "deletion_protection", "false"), + resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "endpoint"), + resource.TestCheckResourceAttr(resourceName, "engine", "docdb"), + resource.TestCheckResourceAttr(resourceName, "engine_version", "4.0.0"), + resource.TestCheckNoResourceAttr(resourceName, "final_snapshot_identifier"), + resource.TestCheckResourceAttr(resourceName, "global_cluster_identifier", ""), + resource.TestCheckResourceAttrSet(resourceName, "hosted_zone_id"), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "master_password", "avoid-plaintext-passwords"), + resource.TestCheckResourceAttr(resourceName, "master_username", "tfacctest"), + resource.TestCheckResourceAttr(resourceName, "port", "27017"), + resource.TestCheckResourceAttrSet(resourceName, "preferred_backup_window"), + resource.TestCheckResourceAttrSet(resourceName, "preferred_maintenance_window"), + resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint"), + resource.TestCheckResourceAttr(resourceName, "skip_final_snapshot", "true"), + resource.TestCheckNoResourceAttr(resourceName, "snapshot_identifier"), + resource.TestCheckResourceAttr(resourceName, "storage_encrypted", "false"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "vpc_security_group_ids.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "allow_major_version_upgrade", + "apply_immediately", + "cluster_identifier_prefix", + "final_snapshot_identifier", + "master_password", + "skip_final_snapshot", + }, + }, + { + Config: testAccClusterConfig_engineVersion(rName, "5.0.0"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "cluster_members.#", "1"), + resource.TestCheckResourceAttr(resourceName, "db_cluster_parameter_group_name", "default.docdb5.0"), + resource.TestCheckResourceAttr(resourceName, "engine_version", "5.0.0"), + ), + }, + }, + }) } func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - return testAccCheckClusterDestroyWithProvider(ctx)(s, acctest.Provider) - } -} - -func testAccCheckClusterDestroyWithProvider(ctx context.Context) acctest.TestCheckWithProviderFunc { - return func(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*conns.AWSClient).DocDBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DocDBConn(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_docdb_cluster" { continue } - // Try to find the Group - var err error - resp, err := conn.DescribeDBClustersWithContext(ctx, &docdb.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) + _, err := tfdocdb.FindDBClusterByID(ctx, conn, rs.Primary.ID) - if err == nil { - if len(resp.DBClusters) != 0 && - *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) - } + if tfresource.NotFound(err) { + continue } - if tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) { - continue + if err != nil { + return err } - return err + return fmt.Errorf("DocumentDB Cluster %s still exists", rs.Primary.ID) } return nil @@ -890,79 +831,59 @@ func testAccCheckClusterExistsProvider(ctx context.Context, n string, v *docdb.D return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") - } + conn := providerF().Meta().(*conns.AWSClient).DocDBConn(ctx) - provider := providerF() - conn := provider.Meta().(*conns.AWSClient).DocDBConn(ctx) - resp, err := conn.DescribeDBClustersWithContext(ctx, &docdb.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) + output, err := tfdocdb.FindDBClusterByID(ctx, conn, rs.Primary.ID) if err != nil { return err } - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == rs.Primary.ID { - *v = *c - return nil - } - } - - return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) - } -} - -func testAccCheckClusterRecreated(i, j *docdb.DBCluster) resource.TestCheckFunc { - return func(s *terraform.State) error { - if aws.TimeValue(i.ClusterCreateTime).Equal(aws.TimeValue(j.ClusterCreateTime)) { - return errors.New("DocumentDB Cluster was not recreated") - } + *v = *output return nil } } -func testAccCheckClusterSnapshot(ctx context.Context, snapshotName string) resource.TestCheckFunc { +func testAccCheckClusterDestroyWithFinalSnapshot(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).DocDBConn(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_docdb_cluster" { continue } - // Try and delete the snapshot before we check for the cluster not found - - awsClient := acctest.Provider.Meta().(*conns.AWSClient) - conn := awsClient.DocDBConn(ctx) - - log.Printf("[INFO] Deleting the Snapshot %s", snapshotName) - _, snapDeleteErr := conn.DeleteDBClusterSnapshotWithContext(ctx, &docdb.DeleteDBClusterSnapshotInput{ - DBClusterSnapshotIdentifier: aws.String(snapshotName), + finalSnapshotID := rs.Primary.Attributes["final_snapshot_identifier"] + _, err := conn.DeleteDBClusterSnapshotWithContext(ctx, &docdb.DeleteDBClusterSnapshotInput{ + DBClusterSnapshotIdentifier: aws.String(finalSnapshotID), }) - if snapDeleteErr != nil { - return snapDeleteErr + + if err != nil { + return err } - // Try to find the Group - var err error - resp, err := conn.DescribeDBClustersWithContext(ctx, &docdb.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) + _, err = tfdocdb.FindDBClusterByID(ctx, conn, rs.Primary.ID) - if err == nil { - if len(resp.DBClusters) != 0 && - *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) - } + if tfresource.NotFound(err) { + continue } - if tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) { - continue + if err != nil { + return err } - return err + return fmt.Errorf("DocumentDB Cluster %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckClusterRecreated(i, j *docdb.DBCluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.TimeValue(i.ClusterCreateTime).Equal(aws.TimeValue(j.ClusterCreateTime)) { + return errors.New("DocumentDB Cluster was not recreated") } return nil @@ -1233,3 +1154,162 @@ resource "aws_docdb_cluster" "default" { } `, isProtected) } + +func testAccClusterConfig_globalIdentifierPrimarySecondary(rNameGlobal, rNamePrimary, rNameSecondary string) string { + return acctest.ConfigCompose( + acctest.ConfigMultipleRegionProvider(2), + fmt.Sprintf(` +data "aws_availability_zones" "alternate" { + provider = "awsalternate" + state = "available" + + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_docdb_global_cluster" "test" { + global_cluster_identifier = "%[1]s" + engine = "docdb" + engine_version = "4.0.0" +} + +resource "aws_docdb_cluster" "primary" { + cluster_identifier = "%[2]s" + master_username = "foo" + master_password = "barbarbar" + skip_final_snapshot = true + global_cluster_identifier = aws_docdb_global_cluster.test.id + engine = aws_docdb_global_cluster.test.engine + engine_version = aws_docdb_global_cluster.test.engine_version +} + +resource "aws_docdb_cluster_instance" "primary" { + identifier = "%[2]s" + cluster_identifier = aws_docdb_cluster.primary.id + instance_class = "db.r5.large" +} + +resource "aws_vpc" "alternate" { + provider = "awsalternate" + cidr_block = "10.0.0.0/16" + + tags = { + Name = "%[3]s" + } +} + +resource "aws_subnet" "alternate" { + provider = "awsalternate" + count = 3 + vpc_id = aws_vpc.alternate.id + availability_zone = data.aws_availability_zones.alternate.names[count.index] + cidr_block = "10.0.${count.index}.0/24" + + tags = { + Name = "%[3]s" + } +} + +resource "aws_docdb_subnet_group" "alternate" { + provider = "awsalternate" + name = "%[3]s" + subnet_ids = aws_subnet.alternate[*].id +} + +resource "aws_docdb_cluster" "secondary" { + provider = "awsalternate" + cluster_identifier = "%[3]s" + skip_final_snapshot = true + db_subnet_group_name = aws_docdb_subnet_group.alternate.name + global_cluster_identifier = aws_docdb_global_cluster.test.id + engine = aws_docdb_global_cluster.test.engine + engine_version = aws_docdb_global_cluster.test.engine_version + depends_on = [aws_docdb_cluster_instance.primary] +} + +resource "aws_docdb_cluster_instance" "secondary" { + provider = "awsalternate" + identifier = "%[3]s" + cluster_identifier = aws_docdb_cluster.secondary.id + instance_class = "db.r5.large" +} +`, rNameGlobal, rNamePrimary, rNameSecondary)) +} + +func testAccClusterConfig_globalIdentifierUpdate(rName, globalClusterIdentifierResourceName string) string { + return fmt.Sprintf(` +resource "aws_docdb_global_cluster" "test" { + count = 2 + engine = "docdb" + engine_version = "4.0.0" # version compatible with global + global_cluster_identifier = "%[1]s-${count.index}" +} + +resource "aws_docdb_cluster" "test" { + cluster_identifier = %[1]q + global_cluster_identifier = %[2]s.id + engine_version = %[2]s.engine_version + master_password = "barbarbarbar" + master_username = "foo" + skip_final_snapshot = true +} +`, rName, globalClusterIdentifierResourceName) +} + +func testAccClusterConfig_globalCompatible(rName string) string { + return fmt.Sprintf(` +resource "aws_docdb_cluster" "test" { + cluster_identifier = %[1]q + engine_version = "4.0.0" # version compatible with global + master_password = "barbarbarbar" + master_username = "foo" + skip_final_snapshot = true +} +`, rName) +} + +func testAccClusterConfig_globalIdentifier(rName string) string { + return fmt.Sprintf(` +resource "aws_docdb_global_cluster" "test" { + engine_version = "4.0.0" # version compatible + engine = "docdb" + global_cluster_identifier = %[1]q +} + +resource "aws_docdb_cluster" "test" { + cluster_identifier = %[1]q + global_cluster_identifier = aws_docdb_global_cluster.test.id + engine_version = aws_docdb_global_cluster.test.engine_version + master_password = "barbarbarbar" + master_username = "foo" + skip_final_snapshot = true +} +`, rName) +} + +func testAccClusterConfig_engineVersion(rName, engineVersion string) string { + return fmt.Sprintf(` +resource "aws_docdb_cluster" "test" { + cluster_identifier = %[1]q + engine_version = %[2]q + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" + skip_final_snapshot = true + apply_immediately = true + allow_major_version_upgrade = true +} + +data "aws_docdb_orderable_db_instance" "test" { + engine = aws_docdb_cluster.test.engine + preferred_instance_classes = ["db.t3.medium", "db.4tg.medium", "db.r5.large", "db.r6g.large"] +} + +resource "aws_docdb_cluster_instance" "test" { + identifier = %[1]q + cluster_identifier = aws_docdb_cluster.test.id + instance_class = data.aws_docdb_orderable_db_instance.test.instance_class +} +`, rName, engineVersion) +} diff --git a/internal/service/docdb/consts.go b/internal/service/docdb/consts.go index 0cf8edbbf85..3b4099b476e 100644 --- a/internal/service/docdb/consts.go +++ b/internal/service/docdb/consts.go @@ -10,3 +10,17 @@ import ( const ( propagationTimeout = 2 * time.Minute ) + +const ( + engineDocDB = "docdb" // nosemgrep:ci.docdb-in-const-name,ci.docdb-in-var-name +) + +func engine_Values() []string { + return []string{ + engineDocDB, + } +} + +const ( + errCodeInvalidParameterValue = "InvalidParameterValue" +) diff --git a/internal/service/docdb/find.go b/internal/service/docdb/find.go index eb66e779ade..865455de871 100644 --- a/internal/service/docdb/find.go +++ b/internal/service/docdb/find.go @@ -62,35 +62,6 @@ func findGlobalClusterIDByARN(ctx context.Context, conn *docdb.DocDB, arn string return "" } -func FindDBClusterById(ctx context.Context, conn *docdb.DocDB, dBClusterID string) (*docdb.DBCluster, error) { - var dBCluster *docdb.DBCluster - - input := &docdb.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(dBClusterID), - } - - err := conn.DescribeDBClustersPagesWithContext(ctx, input, func(page *docdb.DescribeDBClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, dbc := range page.DBClusters { - if dbc == nil { - continue - } - - if aws.StringValue(dbc.DBClusterIdentifier) == dBClusterID { - dBCluster = dbc - return false - } - } - - return !lastPage - }) - - return dBCluster, err -} - func FindDBClusterSnapshotById(ctx context.Context, conn *docdb.DocDB, dBClusterSnapshotID string) (*docdb.DBClusterSnapshot, error) { var dBClusterSnapshot *docdb.DBClusterSnapshot diff --git a/internal/service/docdb/global_cluster.go b/internal/service/docdb/global_cluster.go index 7bd43218feb..d64be95ff9a 100644 --- a/internal/service/docdb/global_cluster.go +++ b/internal/service/docdb/global_cluster.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -60,7 +61,7 @@ func ResourceGlobalCluster() *schema.Resource { ForceNew: true, AtLeastOneOf: []string{"engine", "source_db_cluster_identifier"}, ConflictsWith: []string{"source_db_cluster_identifier"}, - ValidateFunc: validEngine(), + ValidateFunc: validation.StringInSlice(engine_Values(), false), }, "engine_version": { Type: schema.TypeString, @@ -339,8 +340,7 @@ func resourceGlobalClusterUpgradeEngineVersion(ctx context.Context, d *schema.Re return err } for _, clusterMember := range globalCluster.GlobalClusterMembers { - err := waitForClusterUpdate(ctx, conn, findGlobalClusterIDByARN(ctx, conn, aws.StringValue(clusterMember.DBClusterArn)), d.Timeout(schema.TimeoutUpdate)) - if err != nil { + if _, err := waitDBClusterUpdated(ctx, conn, findGlobalClusterIDByARN(ctx, conn, aws.StringValue(clusterMember.DBClusterArn)), d.Timeout(schema.TimeoutUpdate)); err != nil { return err } } diff --git a/internal/service/docdb/status.go b/internal/service/docdb/status.go index 90e8e229151..ef589d35fe9 100644 --- a/internal/service/docdb/status.go +++ b/internal/service/docdb/status.go @@ -30,22 +30,6 @@ func statusGlobalClusterRefreshFunc(ctx context.Context, conn *docdb.DocDB, glob } } -func statusDBClusterRefreshFunc(ctx context.Context, conn *docdb.DocDB, dBClusterID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - dBCluster, err := FindDBClusterById(ctx, conn, dBClusterID) - - if tfawserr.ErrCodeEquals(err, docdb.ErrCodeDBClusterNotFoundFault) || dBCluster == nil { - return nil, DBClusterStatusDeleted, nil - } - - if err != nil { - return nil, "", fmt.Errorf("reading DocumentDB Cluster (%s): %w", dBClusterID, err) - } - - return dBCluster, aws.StringValue(dBCluster.Status), nil - } -} - func statusDBClusterSnapshotRefreshFunc(ctx context.Context, conn *docdb.DocDB, dBClusterSnapshotID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { dBClusterSnapshot, err := FindDBClusterSnapshotById(ctx, conn, dBClusterSnapshotID) diff --git a/internal/service/docdb/sweep.go b/internal/service/docdb/sweep.go index aee3affa6a2..91f9d1ac633 100644 --- a/internal/service/docdb/sweep.go +++ b/internal/service/docdb/sweep.go @@ -74,35 +74,30 @@ func init() { func sweepDBClusters(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("error getting client: %d", err) } - conn := client.DocDBConn(ctx) input := &docdb.DescribeDBClustersInput{} + sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeDBClustersPagesWithContext(ctx, input, func(out *docdb.DescribeDBClustersOutput, lastPage bool) bool { - for _, dBCluster := range out.DBClusters { - id := aws.StringValue(dBCluster.DBClusterIdentifier) - input := &docdb.DeleteDBClusterInput{ - DBClusterIdentifier: dBCluster.DBClusterIdentifier, - SkipFinalSnapshot: aws.Bool(true), - } - - log.Printf("[INFO] Deleting DocumentDB Cluster: %s", id) - - _, err := conn.DeleteDBClusterWithContext(ctx, input) + err = conn.DescribeDBClustersPagesWithContext(ctx, input, func(page *docdb.DescribeDBClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } - if err != nil { - log.Printf("[ERROR] Failed to delete DocumentDB Cluster (%s): %s", id, err) - continue + for _, v := range page.DBClusters { + r := ResourceCluster() + d := r.Data(nil) + d.SetId(aws.StringValue(v.DBClusterIdentifier)) + d.Set("skip_final_snapshot", true) + if globalCluster, err := findGlobalClusterByARN(ctx, conn, aws.StringValue(v.DBClusterArn)); err == nil && globalCluster != nil { + d.Set("global_cluster_identifier", globalCluster.GlobalClusterIdentifier) } - if err := WaitForDBClusterDeletion(ctx, conn, id, DBClusterDeleteTimeout); err != nil { - log.Printf("[ERROR] Failure while waiting for DocumentDB Cluster (%s) to be deleted: %s", id, err) - } + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } + return !lastPage }) @@ -112,7 +107,13 @@ func sweepDBClusters(region string) error { } if err != nil { - return fmt.Errorf("retrieving DocumentDB Clusters: %w", err) + return fmt.Errorf("error listing DocumentDB Clusters (%s): %w", region, err) + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping DocumentDB Clusters (%s): %w", region, err) } return nil diff --git a/internal/service/docdb/validate.go b/internal/service/docdb/validate.go index 76b8e5f45ee..37251767183 100644 --- a/internal/service/docdb/validate.go +++ b/internal/service/docdb/validate.go @@ -8,8 +8,6 @@ import ( "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func validClusterIdentifier(v interface{}, k string) (ws []string, errors []error) { @@ -54,12 +52,6 @@ func validClusterSnapshotIdentifier(v interface{}, k string) (ws []string, error return } -func validEngine() schema.SchemaValidateFunc { - return validation.StringInSlice([]string{ - "docdb", - }, false) -} - func validIdentifier(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !regexache.MustCompile(`^[0-9a-z-]+$`).MatchString(value) { diff --git a/internal/service/docdb/wait.go b/internal/service/docdb/wait.go index 205321d9ae2..ba19652c502 100644 --- a/internal/service/docdb/wait.go +++ b/internal/service/docdb/wait.go @@ -16,7 +16,6 @@ import ( const ( DBClusterSnapshotDeleteTimeout = 5 * time.Minute - DBClusterDeleteTimeout = 5 * time.Minute DBInstanceDeleteTimeout = 5 * time.Minute DBSubnetGroupDeleteTimeout = 5 * time.Minute EventSubscriptionDeleteTimeout = 5 * time.Minute @@ -26,9 +25,6 @@ const ( ) const ( - DBClusterStatusAvailable = "available" - DBClusterStatusDeleted = "deleted" - DBClusterStatusDeleting = "deleting" DBInstanceStatusAvailable = "available" DBInstanceStatusDeleted = "deleted" DBInstanceStatusDeleting = "deleting" @@ -110,25 +106,6 @@ func waitForGlobalClusterRemoval(ctx context.Context, conn *docdb.DocDB, dbClust return nil } -func WaitForDBClusterDeletion(ctx context.Context, conn *docdb.DocDB, dBClusterID string, timeout time.Duration) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{DBClusterStatusAvailable, DBClusterStatusDeleting}, - Target: []string{DBClusterStatusDeleted}, - Refresh: statusDBClusterRefreshFunc(ctx, conn, dBClusterID), - Timeout: timeout, - NotFoundChecks: 1, - } - - log.Printf("[DEBUG] Waiting for DocumentDB Cluster (%s) deletion", dBClusterID) - _, err := stateConf.WaitForStateContext(ctx) - - if tfresource.NotFound(err) { - return nil - } - - return err -} - func WaitForDBClusterSnapshotDeletion(ctx context.Context, conn *docdb.DocDB, dBClusterSnapshotID string, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ Pending: []string{DBClusterSnapshotStatusAvailable, DBClusterSnapshotStatusDeleting}, diff --git a/internal/service/neptune/cluster.go b/internal/service/neptune/cluster.go index 155fd32ce7c..e2088118755 100644 --- a/internal/service/neptune/cluster.go +++ b/internal/service/neptune/cluster.go @@ -563,7 +563,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).NeptuneConn(ctx) - if d.HasChangesExcept("tags", "tags_all", "iam_roles", "global_cluster_identifier") { + if d.HasChangesExcept("tags", "tags_all", "global_cluster_identifier", "iam_roles", "skip_final_snapshot") { allowMajorVersionUpgrade := d.Get("allow_major_version_upgrade").(bool) input := &neptune.ModifyDBClusterInput{ AllowMajorVersionUpgrade: aws.Bool(allowMajorVersionUpgrade), diff --git a/website/docs/r/docdb_cluster.html.markdown b/website/docs/r/docdb_cluster.html.markdown index c5de4947ca0..16d80874e73 100644 --- a/website/docs/r/docdb_cluster.html.markdown +++ b/website/docs/r/docdb_cluster.html.markdown @@ -42,6 +42,7 @@ the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/referenc This argument supports the following arguments: +* `allow_major_version_upgrade` - (Optional) A value that indicates whether major version upgrades are allowed. Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. * `apply_immediately` - (Optional) Specifies whether any cluster modifications are applied immediately, or during the next maintenance window. Default is `false`.