Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Supports config_server_type and config_server_management_mode in advanced_cluster #2670

Merged
merged 9 commits into from
Oct 14, 2024
11 changes: 11 additions & 0 deletions .changelog/2670.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
```release-note:enhancement
resource/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

knit: I think it's ok although in other occasions we've added a line per field

```

```release-note:enhancement
data-source/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields
```

```release-note:enhancement
data-source/mongodbatlas_advanced_clusters: Adds new `config_server_management_mode` and `config_server_type` fields
```
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,14 @@ func DataSource() *schema.Resource {
Type: schema.TypeBool,
Computed: true,
},
"config_server_management_mode": {
Type: schema.TypeString,
Computed: true,
},
"config_server_type": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
Expand Down Expand Up @@ -312,7 +320,10 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
}

diags := setRootFields(d, convertClusterDescToLatestExcludeRepSpecs(clusterDescOld), false)
clusterDesc := convertClusterDescToLatestExcludeRepSpecs(clusterDescOld)
clusterDesc.ConfigServerManagementMode = clusterDescNew.ConfigServerManagementMode
clusterDesc.ConfigServerType = clusterDescNew.ConfigServerType
diags := setRootFields(d, clusterDesc, false)
if diags.HasError() {
return diags
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,14 @@ func PluralDataSource() *schema.Resource {
Type: schema.TypeBool,
Computed: true,
},
"config_server_management_mode": {
Type: schema.TypeString,
Computed: true,
},
"config_server_type": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
Expand Down Expand Up @@ -367,6 +375,8 @@ func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530.
"global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(),
"replica_set_scaling_strategy": cluster.GetReplicaSetScalingStrategy(),
"redact_client_log_data": cluster.GetRedactClientLogData(),
"config_server_management_mode": cluster.GetConfigServerManagementMode(),
"config_server_type": cluster.GetConfigServerType(),
}
results = append(results, result)
}
Expand Down Expand Up @@ -424,6 +434,8 @@ func flattenAdvancedClustersOldSDK(ctx context.Context, connV20240530 *admin2024
"global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(),
"replica_set_scaling_strategy": clusterDescNew.GetReplicaSetScalingStrategy(),
"redact_client_log_data": clusterDescNew.GetRedactClientLogData(),
"config_server_management_mode": clusterDescNew.GetConfigServerManagementMode(),
"config_server_type": clusterDescNew.GetConfigServerType(),
}
results = append(results, result)
}
Expand Down
43 changes: 37 additions & 6 deletions internal/service/advancedcluster/resource_advanced_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -346,6 +346,15 @@ func Resource() *schema.Resource {
Optional: true,
Computed: true,
},
"config_server_management_mode": {
EspenAlbert marked this conversation as resolved.
Show resolved Hide resolved
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"config_server_type": {
Type: schema.TypeString,
Computed: true,
},
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(3 * time.Hour),
Expand Down Expand Up @@ -458,6 +467,9 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.
if v, ok := d.GetOk("redact_client_log_data"); ok {
params.RedactClientLogData = conversion.Pointer(v.(bool))
}
if v, ok := d.GetOk("config_server_management_mode"); ok {
params.ConfigServerManagementMode = conversion.StringPtr(v.(string))
}

// Validate oplog_size_mb to show the error before the cluster is created.
if oplogSizeMB, ok := d.GetOkExists("advanced_configuration.0.oplog_size_mb"); ok {
Expand Down Expand Up @@ -564,7 +576,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
if err := d.Set("redact_client_log_data", cluster.GetRedactClientLogData()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err))
}

zoneNameToZoneIDs, err := getZoneIDsFromNewAPI(cluster)
if err != nil {
return diag.FromErr(err)
Expand All @@ -576,6 +587,8 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
}

clusterResp = convertClusterDescToLatestExcludeRepSpecs(clusterOldSDK)
clusterResp.ConfigServerManagementMode = cluster.ConfigServerManagementMode
clusterResp.ConfigServerType = cluster.ConfigServerType
} else {
cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute()
if err != nil {
Expand Down Expand Up @@ -748,7 +761,16 @@ func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription2024
if err := d.Set("global_cluster_self_managed_sharding", cluster.GetGlobalClusterSelfManagedSharding()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err))
}

if cluster.ConfigServerType != nil {
lantoli marked this conversation as resolved.
Show resolved Hide resolved
if err := d.Set("config_server_type", *cluster.ConfigServerType); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "config_server_type", clusterName, err))
}
}
if cluster.ConfigServerManagementMode != nil {
if err := d.Set("config_server_management_mode", *cluster.ConfigServerManagementMode); err != nil {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we simplify this by using SDK getter functions? Same for above

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You were right. Changed in c38307c
I thought we didn't want to update state when the user hasn't set those fields before

return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "config_server_management_mode", clusterName, err))
}
}
return nil
}

Expand Down Expand Up @@ -815,27 +837,33 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.
return diags
}
clusterChangeDetect := new(admin20240530.AdvancedClusterDescription)
var waitOnUpdate bool
if !reflect.DeepEqual(req, clusterChangeDetect) {
if err := CheckRegionConfigsPriorityOrderOld(req.GetReplicationSpecs()); err != nil {
return diag.FromErr(err)
}
if _, _, err := connV220240530.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil {
return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err))
}
if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil {
return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err))
}
} else if d.HasChange("replica_set_scaling_strategy") || d.HasChange("redact_client_log_data") {
waitOnUpdate = true
}
if d.HasChange("replica_set_scaling_strategy") || d.HasChange("redact_client_log_data") || d.HasChange("config_server_management_mode") {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fix: supports updating both old cluster attributes and new attributes when using old SDK

request := new(admin.ClusterDescription20240805)
if d.HasChange("replica_set_scaling_strategy") {
request.ReplicaSetScalingStrategy = conversion.Pointer(d.Get("replica_set_scaling_strategy").(string))
}
if d.HasChange("redact_client_log_data") {
request.RedactClientLogData = conversion.Pointer(d.Get("redact_client_log_data").(bool))
}
if d.HasChange("config_server_management_mode") {
request.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string))
}
if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, request).Execute(); err != nil {
return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err))
}
waitOnUpdate = true
}
if waitOnUpdate {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

use waitOnUpdateToFinish at the end instead of doing it twice inside each if

if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil {
return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err))
}
Expand Down Expand Up @@ -993,6 +1021,9 @@ func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clust
if d.HasChange("redact_client_log_data") {
cluster.RedactClientLogData = conversion.Pointer(d.Get("redact_client_log_data").(bool))
}
if d.HasChange("config_server_management_mode") {
cluster.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string))
}

return cluster, nil
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@ const (
dataSourcePluralName = "data.mongodbatlas_advanced_clusters.test"
)

var (
configServerManagementModeFixedToDedicated = "FIXED_TO_DEDICATED"
configServerManagementModeAtlasManaged = "ATLAS_MANAGED"
)

func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) {
var (
projectID = acc.ProjectIDExecution(t)
Expand Down Expand Up @@ -142,12 +147,12 @@ func singleShardedMultiCloudTestCase(t *testing.T) resource.TestCase {
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 1, "M10"),
Check: checkShardedOldSchemaMultiCloud(clusterName, 1, "M10", true),
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 1, "M10", nil),
Check: checkShardedOldSchemaMultiCloud(clusterName, 1, "M10", true, nil),
},
{
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterNameUpdated, 1, "M10"),
Check: checkShardedOldSchemaMultiCloud(clusterNameUpdated, 1, "M10", true),
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterNameUpdated, 1, "M10", nil),
Check: checkShardedOldSchemaMultiCloud(clusterNameUpdated, 1, "M10", true, nil),
},
{
ResourceName: resourceName,
Expand Down Expand Up @@ -555,12 +560,12 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchema(t *testing.T)
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M10"),
Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M10", false),
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M10", &configServerManagementModeFixedToDedicated),
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. Choosing to only test for the old schema as there is extra logic there.
  2. Choosing to re-use existing test as we need a sharded cluster (also picked a test without migration tests since we are introducing a new field)

Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M10", false, &configServerManagementModeFixedToDedicated),
},
{
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M20"),
Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M20", false),
Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M20", &configServerManagementModeAtlasManaged),
Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M20", false, &configServerManagementModeAtlasManaged),
},
},
})
Expand Down Expand Up @@ -1181,7 +1186,17 @@ func checkReplicaSetMultiCloud(name string, regionConfigs int) resource.TestChec
)
}

func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards int, analyticsSize string) string {
func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards int, analyticsSize string, configServerManagementMode *string) string {
var rootConfig string
if configServerManagementMode != nil {
// valid values: FIXED_TO_DEDICATED or ATLAS_MANAGED (default)
lantoli marked this conversation as resolved.
Show resolved Hide resolved
// only valid for Major version 8 and later
// cluster must be SHARDED
rootConfig = fmt.Sprintf(`
mongo_db_major_version = "8"
config_server_management_mode = %[1]q
Comment on lines +1196 to +1197
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we clarify in docs that this attribute requires mongodb version > 8 and cluster must be sharded?

Copy link
Collaborator Author

@EspenAlbert EspenAlbert Oct 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can find the doc updates here: #2677 it mentions sharded.
About version > 8, that is only for embedded, so the field is still valid on earlier versions but it will always be direct.

`, *configServerManagementMode)
}
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
org_id = %[1]q
Expand All @@ -1192,6 +1207,7 @@ func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards
project_id = mongodbatlas_project.cluster_project.id
name = %[3]q
cluster_type = "SHARDED"
%[6]s

replication_specs {
num_shards = %[4]d
Expand Down Expand Up @@ -1224,10 +1240,13 @@ func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards
project_id = mongodbatlas_advanced_cluster.test.project_id
name = mongodbatlas_advanced_cluster.test.name
}
`, orgID, projectName, name, numShards, analyticsSize)
data "mongodbatlas_advanced_clusters" "test" {
project_id = mongodbatlas_advanced_cluster.test.project_id
}
`, orgID, projectName, name, numShards, analyticsSize, rootConfig)
}

func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize string, verifyExternalID bool) resource.TestCheckFunc {
func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize string, verifyExternalID bool, configServerManagementMode *string) resource.TestCheckFunc {
additionalChecks := []resource.TestCheckFunc{
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)),
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)),
Expand All @@ -1242,6 +1261,17 @@ func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize s
additionalChecks,
resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.external_id"))
}
if configServerManagementMode != nil {
additionalChecks = append(
additionalChecks,
resource.TestCheckResourceAttr(resourceName, "config_server_management_mode", *configServerManagementMode),
resource.TestCheckResourceAttrSet(resourceName, "config_server_type"),
resource.TestCheckResourceAttr(dataSourceName, "config_server_management_mode", *configServerManagementMode),
resource.TestCheckResourceAttrSet(dataSourceName, "config_server_type"),
resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.config_server_management_mode", *configServerManagementMode),
resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.config_server_type"),
)
}

return checkAggr(
[]string{"project_id", "replication_specs.#", "replication_specs.0.id", "replication_specs.0.region_configs.#"},
Expand Down