diff --git a/.changelog/2670.txt b/.changelog/2670.txt new file mode 100644 index 0000000000..981dbefa4d --- /dev/null +++ b/.changelog/2670.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_clusters: Adds new `config_server_management_mode` and `config_server_type` fields +``` diff --git a/docs/data-sources/advanced_cluster.md b/docs/data-sources/advanced_cluster.md index 08596ef3de..bd62827de3 100644 --- a/docs/data-sources/advanced_cluster.md +++ b/docs/data-sources/advanced_cluster.md @@ -105,6 +105,8 @@ In addition to all arguments above, the following attributes are exported: * `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true). * `replica_set_scaling_strategy` - (Optional) Replica set scaling mode for your cluster. * `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. +* `config_server_management_mode` - Config Server Management Mode for creating or updating a sharded cluster. Valid values are `ATLAS_MANAGED` (default) and `FIXED_TO_DEDICATED`. When configured as `ATLAS_MANAGED`, Atlas may automatically switch the cluster's config server type for optimal performance and savings. When configured as `FIXED_TO_DEDICATED`, the cluster will always use a dedicated config server. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). +* `config_server_type` Describes a sharded cluster's config server type. Valid values are `DEDICATED` and `EMBEDDED`. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). ### bi_connector_config diff --git a/docs/data-sources/advanced_clusters.md b/docs/data-sources/advanced_clusters.md index bedbe75d1c..98f85f42bd 100644 --- a/docs/data-sources/advanced_clusters.md +++ b/docs/data-sources/advanced_clusters.md @@ -107,6 +107,8 @@ In addition to all arguments above, the following attributes are exported: * `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true). * `replica_set_scaling_strategy` - (Optional) Replica set scaling mode for your cluster. * `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. +* `config_server_management_mode` - Config Server Management Mode for creating or updating a sharded cluster. Valid values are `ATLAS_MANAGED` (default) and `FIXED_TO_DEDICATED`. When configured as `ATLAS_MANAGED`, Atlas may automatically switch the cluster's config server type for optimal performance and savings. When configured as `FIXED_TO_DEDICATED`, the cluster will always use a dedicated config server. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). +* `config_server_type` Describes a sharded cluster's config server type. Valid values are `DEDICATED` and `EMBEDDED`. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). ### bi_connector_config diff --git a/docs/resources/advanced_cluster.md b/docs/resources/advanced_cluster.md index 80b5606e2c..27c39b3785 100644 --- a/docs/resources/advanced_cluster.md +++ b/docs/resources/advanced_cluster.md @@ -400,6 +400,7 @@ This parameter defaults to false. * `global_cluster_self_managed_sharding` - (Optional) Flag that indicates if cluster uses Atlas-Managed Sharding (false, default) or Self-Managed Sharding (true). It can only be enabled for Global Clusters (`GEOSHARDED`). It cannot be changed once the cluster is created. Use this mode if you're an advanced user and the default configuration is too restrictive for your workload. If you select this option, you must manually configure the sharding strategy, more info [here](https://www.mongodb.com/docs/atlas/tutorial/create-global-cluster/#select-your-sharding-configuration). * `replica_set_scaling_strategy` - (Optional) Replica set scaling mode for your cluster. Valid values are `WORKLOAD_TYPE`, `SEQUENTIAL` and `NODE_TYPE`. By default, Atlas scales under `WORKLOAD_TYPE`. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes. When configured as `SEQUENTIAL`, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads. When configured as `NODE_TYPE`, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads. [Modify the Replica Set Scaling Mode](https://dochub.mongodb.org/core/scale-nodes) * `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. Use this in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements. **Note**: Changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated. +* `config_server_management_mode` - (Optional) Config Server Management Mode for creating or updating a sharded cluster. Valid values are `ATLAS_MANAGED` (default) and `FIXED_TO_DEDICATED`. When configured as `ATLAS_MANAGED`, Atlas may automatically switch the cluster's config server type for optimal performance and savings. When configured as `FIXED_TO_DEDICATED`, the cluster will always use a dedicated config server. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). ### bi_connector_config @@ -682,6 +683,8 @@ In addition to all arguments above, the following attributes are exported: - DELETED - REPAIRING * `replication_specs.#.container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container created when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`. +* `config_server_type` Describes a sharded cluster's config server type. Valid values are `DEDICATED` and `EMBEDDED`. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). + ## Import diff --git a/internal/service/advancedcluster/data_source_advanced_cluster.go b/internal/service/advancedcluster/data_source_advanced_cluster.go index 7452f8e62d..40e58fee9d 100644 --- a/internal/service/advancedcluster/data_source_advanced_cluster.go +++ b/internal/service/advancedcluster/data_source_advanced_cluster.go @@ -252,6 +252,14 @@ func DataSource() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "config_server_management_mode": { + Type: schema.TypeString, + Computed: true, + }, + "config_server_type": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -310,7 +318,10 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } - diags := setRootFields(d, convertClusterDescToLatestExcludeRepSpecs(clusterDescOld), false) + clusterDesc := convertClusterDescToLatestExcludeRepSpecs(clusterDescOld) + clusterDesc.ConfigServerManagementMode = clusterDescNew.ConfigServerManagementMode + clusterDesc.ConfigServerType = clusterDescNew.ConfigServerType + diags := setRootFields(d, clusterDesc, false) if diags.HasError() { return diags } diff --git a/internal/service/advancedcluster/data_source_advanced_clusters.go b/internal/service/advancedcluster/data_source_advanced_clusters.go index 65a654e0b7..d9c52e70be 100644 --- a/internal/service/advancedcluster/data_source_advanced_clusters.go +++ b/internal/service/advancedcluster/data_source_advanced_clusters.go @@ -265,6 +265,14 @@ func PluralDataSource() *schema.Resource { Type: schema.TypeBool, Computed: true, }, + "config_server_management_mode": { + Type: schema.TypeString, + Computed: true, + }, + "config_server_type": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -365,6 +373,8 @@ func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530. "global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(), "replica_set_scaling_strategy": cluster.GetReplicaSetScalingStrategy(), "redact_client_log_data": cluster.GetRedactClientLogData(), + "config_server_management_mode": cluster.GetConfigServerManagementMode(), + "config_server_type": cluster.GetConfigServerType(), } results = append(results, result) } @@ -422,6 +432,8 @@ func flattenAdvancedClustersOldSDK(ctx context.Context, connV20240530 *admin2024 "global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(), "replica_set_scaling_strategy": clusterDescNew.GetReplicaSetScalingStrategy(), "redact_client_log_data": clusterDescNew.GetRedactClientLogData(), + "config_server_management_mode": clusterDescNew.GetConfigServerManagementMode(), + "config_server_type": clusterDescNew.GetConfigServerType(), } results = append(results, result) } diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 3c2edab6b2..4cbc853a57 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -345,6 +345,15 @@ func Resource() *schema.Resource { Optional: true, Computed: true, }, + "config_server_management_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "config_server_type": { + Type: schema.TypeString, + Computed: true, + }, }, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(3 * time.Hour), @@ -457,6 +466,9 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if v, ok := d.GetOk("redact_client_log_data"); ok { params.RedactClientLogData = conversion.Pointer(v.(bool)) } + if v, ok := d.GetOk("config_server_management_mode"); ok { + params.ConfigServerManagementMode = conversion.StringPtr(v.(string)) + } // Validate oplog_size_mb to show the error before the cluster is created. if oplogSizeMB, ok := d.GetOkExists("advanced_configuration.0.oplog_size_mb"); ok { @@ -563,7 +575,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di if err := d.Set("redact_client_log_data", cluster.GetRedactClientLogData()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) } - zoneNameToZoneIDs, err := getZoneIDsFromNewAPI(cluster) if err != nil { return diag.FromErr(err) @@ -575,6 +586,8 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di } clusterResp = convertClusterDescToLatestExcludeRepSpecs(clusterOldSDK) + clusterResp.ConfigServerManagementMode = cluster.ConfigServerManagementMode + clusterResp.ConfigServerType = cluster.ConfigServerType } else { cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { @@ -748,6 +761,13 @@ func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription2024 return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err)) } + if err := d.Set("config_server_type", cluster.GetConfigServerType()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "config_server_type", clusterName, err)) + } + + if err := d.Set("config_server_management_mode", cluster.GetConfigServerManagementMode()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "config_server_management_mode", clusterName, err)) + } return nil } @@ -814,6 +834,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. return diags } clusterChangeDetect := new(admin20240530.AdvancedClusterDescription) + var waitOnUpdate bool if !reflect.DeepEqual(req, clusterChangeDetect) { if err := CheckRegionConfigsPriorityOrderOld(req.GetReplicationSpecs()); err != nil { return diag.FromErr(err) @@ -821,10 +842,9 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if _, _, err := connV220240530.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } - if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) - } - } else if d.HasChange("replica_set_scaling_strategy") || d.HasChange("redact_client_log_data") { + waitOnUpdate = true + } + if d.HasChange("replica_set_scaling_strategy") || d.HasChange("redact_client_log_data") || d.HasChange("config_server_management_mode") { request := new(admin.ClusterDescription20240805) if d.HasChange("replica_set_scaling_strategy") { request.ReplicaSetScalingStrategy = conversion.Pointer(d.Get("replica_set_scaling_strategy").(string)) @@ -832,9 +852,15 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if d.HasChange("redact_client_log_data") { request.RedactClientLogData = conversion.Pointer(d.Get("redact_client_log_data").(bool)) } + if d.HasChange("config_server_management_mode") { + request.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string)) + } if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, request).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } + waitOnUpdate = true + } + if waitOnUpdate { if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } @@ -992,6 +1018,9 @@ func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clust if d.HasChange("redact_client_log_data") { cluster.RedactClientLogData = conversion.Pointer(d.Get("redact_client_log_data").(bool)) } + if d.HasChange("config_server_management_mode") { + cluster.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string)) + } return cluster, nil } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 0be73c1911..40904c5f41 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -24,6 +24,11 @@ const ( dataSourcePluralName = "data.mongodbatlas_advanced_clusters.test" ) +var ( + configServerManagementModeFixedToDedicated = "FIXED_TO_DEDICATED" + configServerManagementModeAtlasManaged = "ATLAS_MANAGED" +) + func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) { var ( projectID = acc.ProjectIDExecution(t) @@ -142,12 +147,12 @@ func singleShardedMultiCloudTestCase(t *testing.T) resource.TestCase { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 1, "M10"), - Check: checkShardedOldSchemaMultiCloud(clusterName, 1, "M10", true), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 1, "M10", nil), + Check: checkShardedOldSchemaMultiCloud(clusterName, 1, "M10", true, nil), }, { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterNameUpdated, 1, "M10"), - Check: checkShardedOldSchemaMultiCloud(clusterNameUpdated, 1, "M10", true), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterNameUpdated, 1, "M10", nil), + Check: checkShardedOldSchemaMultiCloud(clusterNameUpdated, 1, "M10", true, nil), }, { ResourceName: resourceName, @@ -555,12 +560,12 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchema(t *testing.T) CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M10"), - Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M10", false), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M10", &configServerManagementModeFixedToDedicated), + Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M10", false, &configServerManagementModeFixedToDedicated), }, { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M20"), - Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M20", false), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M20", &configServerManagementModeAtlasManaged), + Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M20", false, &configServerManagementModeAtlasManaged), }, }, }) @@ -1181,7 +1186,17 @@ func checkReplicaSetMultiCloud(name string, regionConfigs int) resource.TestChec ) } -func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards int, analyticsSize string) string { +func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards int, analyticsSize string, configServerManagementMode *string) string { + var rootConfig string + if configServerManagementMode != nil { + // valid values: FIXED_TO_DEDICATED or ATLAS_MANAGED (default) + // only valid for Major version 8 and later + // cluster must be SHARDED + rootConfig = fmt.Sprintf(` + mongo_db_major_version = "8" + config_server_management_mode = %[1]q + `, *configServerManagementMode) + } return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -1192,6 +1207,7 @@ func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards project_id = mongodbatlas_project.cluster_project.id name = %[3]q cluster_type = "SHARDED" + %[6]s replication_specs { num_shards = %[4]d @@ -1223,11 +1239,16 @@ func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards data "mongodbatlas_advanced_cluster" "test" { project_id = mongodbatlas_advanced_cluster.test.project_id name = mongodbatlas_advanced_cluster.test.name + depends_on = [mongodbatlas_advanced_cluster.test] + } + data "mongodbatlas_advanced_clusters" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + depends_on = [mongodbatlas_advanced_cluster.test] } - `, orgID, projectName, name, numShards, analyticsSize) + `, orgID, projectName, name, numShards, analyticsSize, rootConfig) } -func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize string, verifyExternalID bool) resource.TestCheckFunc { +func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize string, verifyExternalID bool, configServerManagementMode *string) resource.TestCheckFunc { additionalChecks := []resource.TestCheckFunc{ resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), @@ -1242,6 +1263,17 @@ func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize s additionalChecks, resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.external_id")) } + if configServerManagementMode != nil { + additionalChecks = append( + additionalChecks, + resource.TestCheckResourceAttr(resourceName, "config_server_management_mode", *configServerManagementMode), + resource.TestCheckResourceAttrSet(resourceName, "config_server_type"), + resource.TestCheckResourceAttr(dataSourceName, "config_server_management_mode", *configServerManagementMode), + resource.TestCheckResourceAttrSet(dataSourceName, "config_server_type"), + resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.config_server_management_mode", *configServerManagementMode), + resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.config_server_type"), + ) + } return checkAggr( []string{"project_id", "replication_specs.#", "replication_specs.0.id", "replication_specs.0.region_configs.#"},