diff --git a/docs/resources/advanced_cluster.md b/docs/resources/advanced_cluster.md index 68c0c2f8d2..8cccdd9280 100644 --- a/docs/resources/advanced_cluster.md +++ b/docs/resources/advanced_cluster.md @@ -565,7 +565,7 @@ If you are upgrading a replica set to a sharded cluster, you cannot increase the * `STANDARD` volume types can't exceed the default IOPS rate for the selected volume size. * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. * `node_count` - (Optional) Number of nodes of the given type for MongoDB Atlas to deploy to the region. -* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using disk_size_gb with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that disk_size_gb is used exclusively with Provisioned IOPS will help avoid these issues. +* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. This value must be equal for all shards and node types. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using `disk_size_gb` with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that `disk_size_gb` is used exclusively with Provisioned IOPS will help avoid these issues. ### analytics_specs @@ -576,7 +576,7 @@ If you are upgrading a replica set to a sharded cluster, you cannot increase the * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. * `instance_size` - (Optional) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. * `node_count` - (Optional) Number of nodes of the given type for MongoDB Atlas to deploy to the region. -* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using disk_size_gb with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that disk_size_gb is used exclusively with Provisioned IOPS will help avoid these issues. +* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. This value must be equal for all shards and node types. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using `disk_size_gb` with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that `disk_size_gb` is used exclusively with Provisioned IOPS will help avoid these issues. ### read_only_specs @@ -586,7 +586,7 @@ If you are upgrading a replica set to a sharded cluster, you cannot increase the * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. * `instance_size` - (Optional) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. * `node_count` - (Optional) Number of nodes of the given type for MongoDB Atlas to deploy to the region. -* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using disk_size_gb with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that disk_size_gb is used exclusively with Provisioned IOPS will help avoid these issues. +* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. This value must be equal for all shards and node types. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using `disk_size_gb` with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that `disk_size_gb` is used exclusively with Provisioned IOPS will help avoid these issues. ### auto_scaling diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 5db7273cf1..468b35d144 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -296,7 +296,7 @@ func GetDiskSizeGBFromReplicationSpec(cluster *admin.ClusterDescription20250101) return configs[0].ElectableSpecs.GetDiskSizeGB() } -func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin20231115.ClustersApi) retry.StateRefreshFunc { +func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin.ClustersApi) retry.StateRefreshFunc { return func() (any, string, error) { cluster, resp, err := client.GetCluster(ctx, projectID, name).Execute() @@ -888,7 +888,9 @@ func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64 ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), RegionConfigs: expandRegionConfigs(tfMap["region_configs"].([]any), rootDiskSizeGB), } - // TODO: CLOUDP-259836 here we will populate id value using external_id value from the state (relevant for update request) + if tfMap["external_id"].(string) != "" { + apiObject.Id = conversion.StringPtr(tfMap["external_id"].(string)) + } return apiObject } @@ -969,12 +971,15 @@ func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *f apiObject.NodeCount = conversion.Pointer(v.(int)) } - apiObject.DiskSizeGB = rootDiskSizeGB - // disk size gb defined in inner level will take precedence over root level. if v, ok := tfMap["disk_size_gb"]; ok && v.(float64) != 0 { apiObject.DiskSizeGB = conversion.Pointer(v.(float64)) } + // value defined in root is set if it is defined in the create, or value has changed in the update. + if rootDiskSizeGB != nil { + apiObject.DiskSizeGB = rootDiskSizeGB + } + return apiObject } diff --git a/internal/service/advancedcluster/model_advanced_cluster_test.go b/internal/service/advancedcluster/model_advanced_cluster_test.go index ebb0668da0..c49243ff11 100644 --- a/internal/service/advancedcluster/model_advanced_cluster_test.go +++ b/internal/service/advancedcluster/model_advanced_cluster_test.go @@ -8,7 +8,6 @@ import ( "testing" admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - mockadmin20231115 "go.mongodb.org/atlas-sdk/v20231115014/mockadmin" "go.mongodb.org/atlas-sdk/v20240530002/admin" "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" @@ -199,7 +198,7 @@ type Result struct { func TestUpgradeRefreshFunc(t *testing.T) { testCases := []struct { - mockCluster *admin20231115.AdvancedClusterDescription + mockCluster *admin.ClusterDescription20250101 mockResponse *http.Response expectedResult Result mockError error @@ -261,11 +260,11 @@ func TestUpgradeRefreshFunc(t *testing.T) { }, { name: "Successful", - mockCluster: &admin20231115.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, + mockCluster: &admin.ClusterDescription20250101{StateName: conversion.StringPtr("stateName")}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ - response: &admin20231115.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, + response: &admin.ClusterDescription20250101{StateName: conversion.StringPtr("stateName")}, state: "stateName", error: nil, }, @@ -274,9 +273,9 @@ func TestUpgradeRefreshFunc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testObject := mockadmin20231115.NewClustersApi(t) + testObject := mockadmin.NewClustersApi(t) - testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin20231115.GetClusterApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin.GetClusterApiRequest{ApiService: testObject}).Once() testObject.EXPECT().GetClusterExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() result, stateName, err := advancedcluster.UpgradeRefreshFunc(context.Background(), dummyClusterName, dummyProjectID, testObject)() diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 62bbbd81fa..d32e0eb2ca 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -43,9 +43,6 @@ const ( DeprecationOldSchemaAction = "Please refer to our examples, documentation, and 1.18.0 migration guide for more details at https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide.html.markdown" ) -type acCtxKey string - -var upgradeRequestCtxKey acCtxKey = "upgradeRequest" var DeprecationMsgOldSchema = fmt.Sprintf("%s %s", constant.DeprecationParam, DeprecationOldSchemaAction) func Resource() *schema.Resource { @@ -476,11 +473,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if v := d.Get("paused").(bool); v { - request := &admin20231115.AdvancedClusterDescription{ + request := &admin.ClusterDescription20250101{ Paused: conversion.Pointer(v), } - _, _, err = updateAdvancedCluster(ctx, connV220231115, connV2, request, projectID, d.Get("name").(string), timeout) - if err != nil { + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, d.Get("name").(string), request).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, d.Get("name").(string), err)) + } + if err = waitForUpdateToFinish(ctx, connV2, projectID, d.Get("name").(string), timeout); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, d.Get("name").(string), err)) } } @@ -708,26 +707,18 @@ func isUsingOldAPISchemaStructure(d *schema.ResourceData) bool { func resourceUpdateOrUpgrade(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { if upgradeRequest := getUpgradeRequest(d); upgradeRequest != nil { - upgradeCtx := context.WithValue(ctx, upgradeRequestCtxKey, upgradeRequest) - return resourceUpgrade(upgradeCtx, d, meta) + return resourceUpgrade(ctx, upgradeRequest, d, meta) } - return resourceUpdate(ctx, d, meta) } -func resourceUpgrade(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 +func resourceUpgrade(ctx context.Context, upgradeRequest *admin.LegacyAtlasTenantClusterUpgradeRequest, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - upgradeRequest := ctx.Value(upgradeRequestCtxKey).(*admin20231115.LegacyAtlasTenantClusterUpgradeRequest) - - if upgradeRequest == nil { - return diag.FromErr(fmt.Errorf("upgrade called without %s in ctx", string(upgradeRequestCtxKey))) - } - - upgradeResponse, _, err := upgradeCluster(ctx, connV220231115, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) + upgradeResponse, _, err := upgradeCluster(ctx, connV2, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) @@ -753,39 +744,95 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf("%s: %s", ErrorOperationNotPermitted, err)) } - cluster := new(admin20231115.AdvancedClusterDescription) - clusterChangeDetect := new(admin20231115.AdvancedClusterDescription) + timeout := d.Timeout(schema.TimeoutUpdate) + + if isUsingOldAPISchemaStructure(d) { + req, diags := updateRequestOldAPI(d, clusterName) + if diags != nil { + return diags + } + clusterChangeDetect := new(admin20231115.AdvancedClusterDescription) + if !reflect.DeepEqual(req, clusterChangeDetect) { + if _, _, err := connV220231115.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + } + } else { + req, diags := updateRequest(d, clusterName) + if diags != nil { + return diags + } + clusterChangeDetect := new(admin.ClusterDescription20250101) + if !reflect.DeepEqual(req, clusterChangeDetect) { + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + } + } + + if d.HasChange("advanced_configuration") { + ac := d.Get("advanced_configuration") + if aclist, ok := ac.([]any); ok && len(aclist) > 0 { + params := expandProcessArgs(d, aclist[0].(map[string]any)) + if !reflect.DeepEqual(params, admin20231115.ClusterDescriptionProcessArgs{}) { + _, _, err := connV220231115.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() + if err != nil { + return diag.FromErr(fmt.Errorf(errorConfigUpdate, clusterName, err)) + } + } + } + } + + if d.Get("paused").(bool) { + clusterRequest := &admin.ClusterDescription20250101{ + Paused: conversion.Pointer(true), + } + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, clusterRequest).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + } + + return resourceRead(ctx, d, meta) +} + +func updateRequest(d *schema.ResourceData, clusterName string) (*admin.ClusterDescription20250101, diag.Diagnostics) { + cluster := new(admin.ClusterDescription20250101) if d.HasChange("backup_enabled") { cluster.BackupEnabled = conversion.Pointer(d.Get("backup_enabled").(bool)) } if d.HasChange("bi_connector_config") { - cluster.BiConnector = convertBiConnectToOldSDK(expandBiConnectorConfig(d)) + cluster.BiConnector = expandBiConnectorConfig(d) } if d.HasChange("cluster_type") { cluster.ClusterType = conversion.StringPtr(d.Get("cluster_type").(string)) } - if d.HasChange("disk_size_gb") { - cluster.DiskSizeGB = conversion.Pointer(d.Get("disk_size_gb").(float64)) - } - if d.HasChange("encryption_at_rest_provider") { cluster.EncryptionAtRestProvider = conversion.StringPtr(d.Get("encryption_at_rest_provider").(string)) } if d.HasChange("labels") { - labels, err := convertLabelSliceToOldSDK(expandLabelSliceFromSetSchema(d)) + labels, err := expandLabelSliceFromSetSchema(d) if err != nil { - return err + return nil, err } cluster.Labels = &labels } if d.HasChange("tags") { - cluster.Tags = convertTagsPtrToOldSDK(conversion.ExpandTagsFromSetSchema(d)) + cluster.Tags = conversion.ExpandTagsFromSetSchema(d) } if d.HasChange("mongo_db_major_version") { @@ -796,8 +843,12 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. cluster.PitEnabled = conversion.Pointer(d.Get("pit_enabled").(bool)) } - if d.HasChange("replication_specs") { - cluster.ReplicationSpecs = expandAdvancedReplicationSpecsOldSDK(d.Get("replication_specs").([]any)) + if d.HasChange("replication_specs") || d.HasChange("disk_size_gb") { + var updatedDiskSizeGB *float64 + if d.HasChange("disk_size_gb") { + updatedDiskSizeGB = conversion.Pointer(d.Get("disk_size_gb").(float64)) + } + cluster.ReplicationSpecs = expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any), updatedDiskSizeGB) } if d.HasChange("root_cert_type") { @@ -820,7 +871,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if strTime := d.Get("accept_data_risks_and_force_replica_set_reconfig").(string); strTime != "" { t, ok := conversion.StringToTime(strTime) if !ok { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, "accept_data_risks_and_force_replica_set_reconfig time format is incorrect")) + return nil, diag.FromErr(fmt.Errorf(errorUpdate, clusterName, "accept_data_risks_and_force_replica_set_reconfig time format is incorrect")) } cluster.AcceptDataRisksAndForceReplicaSetReconfig = &t } @@ -829,50 +880,90 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if d.HasChange("paused") && !d.Get("paused").(bool) { cluster.Paused = conversion.Pointer(d.Get("paused").(bool)) } + return cluster, nil +} - timeout := d.Timeout(schema.TimeoutUpdate) +func updateRequestOldAPI(d *schema.ResourceData, clusterName string) (*admin20231115.AdvancedClusterDescription, diag.Diagnostics) { + cluster := new(admin20231115.AdvancedClusterDescription) - if d.HasChange("advanced_configuration") { - ac := d.Get("advanced_configuration") - if aclist, ok := ac.([]any); ok && len(aclist) > 0 { - params := expandProcessArgs(d, aclist[0].(map[string]any)) - if !reflect.DeepEqual(params, admin20231115.ClusterDescriptionProcessArgs{}) { - _, _, err := connV220231115.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() - if err != nil { - return diag.FromErr(fmt.Errorf(errorConfigUpdate, clusterName, err)) - } - } - } + if d.HasChange("backup_enabled") { + cluster.BackupEnabled = conversion.Pointer(d.Get("backup_enabled").(bool)) } - // Has changes - if !reflect.DeepEqual(cluster, clusterChangeDetect) { - err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { - _, resp, err := updateAdvancedCluster(ctx, connV220231115, connV2, cluster, projectID, clusterName, timeout) - if err != nil { - if resp == nil || resp.StatusCode == 400 { - return retry.NonRetryableError(fmt.Errorf(errorUpdate, clusterName, err)) - } - return retry.RetryableError(fmt.Errorf(errorUpdate, clusterName, err)) - } - return nil - }) + if d.HasChange("bi_connector_config") { + cluster.BiConnector = convertBiConnectToOldSDK(expandBiConnectorConfig(d)) + } + + if d.HasChange("cluster_type") { + cluster.ClusterType = conversion.StringPtr(d.Get("cluster_type").(string)) + } + + if d.HasChange("disk_size_gb") { + cluster.DiskSizeGB = conversion.Pointer(d.Get("disk_size_gb").(float64)) + } + + if changedValue := obtainChangeForDiskSizeGBInFirstRegion(d); changedValue != nil { + cluster.DiskSizeGB = changedValue + } + + if d.HasChange("encryption_at_rest_provider") { + cluster.EncryptionAtRestProvider = conversion.StringPtr(d.Get("encryption_at_rest_provider").(string)) + } + + if d.HasChange("labels") { + labels, err := convertLabelSliceToOldSDK(expandLabelSliceFromSetSchema(d)) if err != nil { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + return nil, err } + cluster.Labels = &labels } - if d.Get("paused").(bool) { - clusterRequest := &admin20231115.AdvancedClusterDescription{ - Paused: conversion.Pointer(true), - } - _, _, err := updateAdvancedCluster(ctx, connV220231115, connV2, clusterRequest, projectID, clusterName, timeout) - if err != nil { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + if d.HasChange("tags") { + cluster.Tags = convertTagsPtrToOldSDK(conversion.ExpandTagsFromSetSchema(d)) + } + + if d.HasChange("mongo_db_major_version") { + cluster.MongoDBMajorVersion = conversion.StringPtr(FormatMongoDBMajorVersion(d.Get("mongo_db_major_version"))) + } + + if d.HasChange("pit_enabled") { + cluster.PitEnabled = conversion.Pointer(d.Get("pit_enabled").(bool)) + } + + if d.HasChange("replication_specs") { + cluster.ReplicationSpecs = expandAdvancedReplicationSpecsOldSDK(d.Get("replication_specs").([]any)) + } + + if d.HasChange("root_cert_type") { + cluster.RootCertType = conversion.StringPtr(d.Get("root_cert_type").(string)) + } + + if d.HasChange("termination_protection_enabled") { + cluster.TerminationProtectionEnabled = conversion.Pointer(d.Get("termination_protection_enabled").(bool)) + } + + if d.HasChange("version_release_system") { + cluster.VersionReleaseSystem = conversion.StringPtr(d.Get("version_release_system").(string)) + } + + if d.HasChange("global_cluster_self_managed_sharding") { + cluster.GlobalClusterSelfManagedSharding = conversion.Pointer(d.Get("global_cluster_self_managed_sharding").(bool)) + } + + if d.HasChange("accept_data_risks_and_force_replica_set_reconfig") { + if strTime := d.Get("accept_data_risks_and_force_replica_set_reconfig").(string); strTime != "" { + t, ok := conversion.StringToTime(strTime) + if !ok { + return nil, diag.FromErr(fmt.Errorf(errorUpdate, clusterName, "accept_data_risks_and_force_replica_set_reconfig time format is incorrect")) + } + cluster.AcceptDataRisksAndForceReplicaSetReconfig = &t } } - return resourceRead(ctx, d, meta) + if d.HasChange("paused") && !d.Get("paused").(bool) { + cluster.Paused = conversion.Pointer(d.Get("paused").(bool)) + } + return cluster, nil } func isUpdateAllowed(d *schema.ResourceData) (bool, error) { @@ -904,6 +995,24 @@ func checkNewSchemaCompatibility(specs []any) bool { return true } +// When legacy schema structure is used we invoke the old API for updates. This API sends diskSizeGB at root level. +// This function is used to detect if changes are made in the inner spec levels. It assumes that all disk_size_gb values at the inner spec level have the same value, so it looks into first region config. +func obtainChangeForDiskSizeGBInFirstRegion(d *schema.ResourceData) *float64 { + electableLocation := "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb" + readOnlyLocation := "replication_specs.0.region_configs.0.read_only_specs.0.disk_size_gb" + analyticsLocation := "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb" + if d.HasChange(electableLocation) { + return admin.PtrFloat64(d.Get(electableLocation).(float64)) + } + if d.HasChange(readOnlyLocation) { + return admin.PtrFloat64(d.Get(readOnlyLocation).(float64)) + } + if d.HasChange(analyticsLocation) { + return admin.PtrFloat64(d.Get(analyticsLocation).(float64)) + } + return nil +} + func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) @@ -976,7 +1085,7 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func upgradeCluster(ctx context.Context, connV2 *admin20231115.APIClient, request *admin20231115.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin20231115.LegacyAtlasCluster, *http.Response, error) { +func upgradeCluster(ctx context.Context, connV2 *admin.APIClient, request *admin.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin.LegacyAtlasCluster, *http.Response, error) { request.Name = name cluster, resp, err := connV2.ClustersApi.UpgradeSharedCluster(ctx, projectID, request).Execute() @@ -1052,7 +1161,7 @@ func replicationSpecsHashSet(v any) int { return schema.HashString(buf.String()) } -func getUpgradeRequest(d *schema.ResourceData) *admin20231115.LegacyAtlasTenantClusterUpgradeRequest { +func getUpgradeRequest(d *schema.ResourceData) *admin.LegacyAtlasTenantClusterUpgradeRequest { if !d.HasChange("replication_specs") { return nil } @@ -1073,8 +1182,8 @@ func getUpgradeRequest(d *schema.ResourceData) *admin20231115.LegacyAtlasTenantC return nil } - return &admin20231115.LegacyAtlasTenantClusterUpgradeRequest{ - ProviderSettings: &admin20231115.ClusterProviderSettings{ + return &admin.LegacyAtlasTenantClusterUpgradeRequest{ + ProviderSettings: &admin.ClusterProviderSettings{ ProviderName: updatedRegion.GetProviderName(), InstanceSizeName: updatedRegion.ElectableSpecs.InstanceSize, RegionName: updatedRegion.RegionName, @@ -1082,19 +1191,7 @@ func getUpgradeRequest(d *schema.ResourceData) *admin20231115.LegacyAtlasTenantC } } -func updateAdvancedCluster( - ctx context.Context, - connV220231115 *admin20231115.APIClient, - connV2 *admin.APIClient, - request *admin20231115.AdvancedClusterDescription, - projectID, name string, - timeout time.Duration, -) (*admin20231115.AdvancedClusterDescription, *http.Response, error) { - cluster, resp, err := connV220231115.ClustersApi.UpdateCluster(ctx, projectID, name, request).Execute() - if err != nil { - return nil, nil, err - } - +func waitForUpdateToFinish(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, Target: []string{"IDLE"}, @@ -1104,10 +1201,6 @@ func updateAdvancedCluster( Delay: 1 * time.Minute, } - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { - return nil, nil, err - } - - return cluster, resp, nil + _, err := stateConf.WaitForStateContext(ctx) + return err } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go b/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go index 413e6666fe..d72a957aa5 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go @@ -15,7 +15,7 @@ func TestMigAdvancedCluster_replicaSetAWSProvider(t *testing.T) { var ( projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() - config = configReplicaSetAWSProvider(projectID, clusterName, 3) + config = configReplicaSetAWSProvider(projectID, clusterName, 60, 3) ) resource.ParallelTest(t, resource.TestCase{ @@ -25,13 +25,13 @@ func TestMigAdvancedCluster_replicaSetAWSProvider(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: checkReplicaSetAWSProvider(projectID, clusterName, 3, false, false), + Check: checkReplicaSetAWSProvider(projectID, clusterName, 60, 3, false, false), }, mig.TestStepCheckEmptyPlan(config), { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Config: config, - Check: checkReplicaSetAWSProvider(projectID, clusterName, 3, true, true), // external_id will be present in latest version + Check: checkReplicaSetAWSProvider(projectID, clusterName, 60, 3, true, true), // external_id will be present in latest version }, }, }) diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index eeb4b67bf6..a3198031c4 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -66,12 +66,12 @@ func TestAccClusterAdvancedCluster_replicaSetAWSProvider(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configReplicaSetAWSProvider(projectID, clusterName, 3), - Check: checkReplicaSetAWSProvider(projectID, clusterName, 3, true, true), + Config: configReplicaSetAWSProvider(projectID, clusterName, 60, 3), + Check: checkReplicaSetAWSProvider(projectID, clusterName, 60, 3, true, true), }, { - Config: configReplicaSetAWSProvider(projectID, clusterName, 5), - Check: checkReplicaSetAWSProvider(projectID, clusterName, 5, true, true), + Config: configReplicaSetAWSProvider(projectID, clusterName, 50, 5), + Check: checkReplicaSetAWSProvider(projectID, clusterName, 50, 5, true, true), }, { ResourceName: resourceName, @@ -524,7 +524,6 @@ func TestAccClusterAdvancedClusterConfig_symmetricGeoShardedOldSchema(t *testing } func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchemaDiskSizeGBAtElectableLevel(t *testing.T) { - acc.SkipTestForCI(t) // TODO: CLOUDP-260154 for ensuring this use case is supported var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() @@ -537,8 +536,12 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchemaDiskSizeGBAtEl CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName), - Check: checkShardedOldSchemaDiskSizeGBElectableLevel(), + Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName, 50), + Check: checkShardedOldSchemaDiskSizeGBElectableLevel(50), + }, + { + Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName, 55), + Check: checkShardedOldSchemaDiskSizeGBElectableLevel(55), }, }, }) @@ -557,8 +560,12 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedNewSchema(t *testing.T) CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configShardedNewSchema(orgID, projectName, clusterName, "M30", "M30", 3000, 3000), - Check: checkShardedNewSchema("M30", "M30", "3000", "3000", false), + Config: configShardedNewSchema(orgID, projectName, clusterName, 50, "M30", "M30", 3000, 3000, false), + Check: checkShardedNewSchema(50, "M30", "M30", "3000", "3000", false, false), + }, + { + Config: configShardedNewSchema(orgID, projectName, clusterName, 55, "M30", "M40", 3000, 3000, true), + Check: checkShardedNewSchema(55, "M30", "M40", "3000", "3000", true, true), }, }, }) @@ -577,8 +584,8 @@ func TestAccClusterAdvancedClusterConfig_asymmetricShardedNewSchema(t *testing.T CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configShardedNewSchema(orgID, projectName, clusterName, "M30", "M40", 3000, 3000), // TODO: disk iops is failing if value is different - Check: checkShardedNewSchema("M30", "M40", "3000", "3000", true), // replication spec old ids not populated for asymmetric cluster + Config: configShardedNewSchema(orgID, projectName, clusterName, 50, "M30", "M40", 3000, 3000, false), // TODO: disk iops is failing if value is different + Check: checkShardedNewSchema(50, "M30", "M40", "3000", "3000", true, false), // replication spec old ids not populated for asymmetric cluster }, }, }) @@ -729,20 +736,20 @@ func checkTags(name string, tags ...map[string]string) resource.TestCheckFunc { tagChecks...) } -func configReplicaSetAWSProvider(projectID, name string, nodeCountElectable int) string { +func configReplicaSetAWSProvider(projectID, name string, diskSizeGB, nodeCountElectable int) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q name = %[2]q cluster_type = "REPLICASET" retain_backups_enabled = "true" - disk_size_gb = 60 + disk_size_gb = %[3]d replication_specs { region_configs { electable_specs { instance_size = "M10" - node_count = %[3]d + node_count = %[4]d } analytics_specs { instance_size = "M10" @@ -759,10 +766,10 @@ func configReplicaSetAWSProvider(projectID, name string, nodeCountElectable int) project_id = mongodbatlas_advanced_cluster.test.project_id name = mongodbatlas_advanced_cluster.test.name } - `, projectID, name, nodeCountElectable) + `, projectID, name, diskSizeGB, nodeCountElectable) } -func checkReplicaSetAWSProvider(projectID, name string, nodeCountElectable int, checkDiskSizeGBInnerLevel, checkExternalID bool) resource.TestCheckFunc { +func checkReplicaSetAWSProvider(projectID, name string, diskSizeGB, nodeCountElectable int, checkDiskSizeGBInnerLevel, checkExternalID bool) resource.TestCheckFunc { additionalChecks := []resource.TestCheckFunc{ resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), @@ -771,8 +778,8 @@ func checkReplicaSetAWSProvider(projectID, name string, nodeCountElectable int, if checkDiskSizeGBInnerLevel { additionalChecks = append(additionalChecks, checkAggr([]string{}, map[string]string{ - "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", - "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), }), ) } @@ -785,7 +792,7 @@ func checkReplicaSetAWSProvider(projectID, name string, nodeCountElectable int, []string{"replication_specs.#", "replication_specs.0.id", "replication_specs.0.region_configs.#"}, map[string]string{ "project_id": projectID, - "disk_size_gb": "60", + "disk_size_gb": fmt.Sprintf("%d", diskSizeGB), "replication_specs.0.region_configs.0.electable_specs.0.node_count": fmt.Sprintf("%d", nodeCountElectable), "name": name}, additionalChecks..., @@ -1292,7 +1299,7 @@ func checkGeoShardedOldSchema(name string, numShardsFirstZone, numShardsSecondZo ) } -func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name string) string { +func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name string, diskSizeGB int) string { return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -1313,12 +1320,12 @@ func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name str electable_specs { instance_size = "M10" node_count = 3 - disk_size_gb = 60 + disk_size_gb = %[4]d } analytics_specs { instance_size = "M10" node_count = 0 - disk_size_gb = 60 + disk_size_gb = %[4]d } provider_name = "AWS" priority = 7 @@ -1331,21 +1338,44 @@ func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name str project_id = mongodbatlas_advanced_cluster.test.project_id name = mongodbatlas_advanced_cluster.test.name } - `, orgID, projectName, name) + `, orgID, projectName, name, diskSizeGB) } -func checkShardedOldSchemaDiskSizeGBElectableLevel() resource.TestCheckFunc { +func checkShardedOldSchemaDiskSizeGBElectableLevel(diskSizeGB int) resource.TestCheckFunc { return checkAggr( []string{}, map[string]string{ "replication_specs.0.num_shards": "2", - "disk_size_gb": "60", - "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", - "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", + "disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), }) } -func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanceSizeSpec2 string, diskIopsSpec1, diskIopsSpec2 int) string { +func configShardedNewSchema(orgID, projectName, name string, diskSizeGB int, instanceSizeSpec1, instanceSizeSpec2 string, diskIopsSpec1, diskIopsSpec2 int, includeThirdSpec bool) string { + var thirdReplicationSpec string + if includeThirdSpec { + thirdReplicationSpec = fmt.Sprintf(` + replication_specs { + region_configs { + electable_specs { + instance_size = %[1]q + disk_iops = %[2]d + node_count = 3 + disk_size_gb = %[3]d + } + analytics_specs { + instance_size = %[1]q + node_count = 1 + disk_size_gb = %[3]d + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + `, instanceSizeSpec1, diskIopsSpec1, diskSizeGB) + } return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -1364,12 +1394,12 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc instance_size = %[4]q disk_iops = %[6]d node_count = 3 - disk_size_gb = 60 + disk_size_gb = %[9]d } analytics_specs { instance_size = %[4]q node_count = 1 - disk_size_gb = 60 + disk_size_gb = %[9]d } provider_name = "AWS" priority = 7 @@ -1383,18 +1413,20 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc instance_size = %[5]q disk_iops = %[7]d node_count = 3 - disk_size_gb = 60 + disk_size_gb = %[9]d } analytics_specs { instance_size = %[5]q node_count = 1 - disk_size_gb = 60 + disk_size_gb = %[9]d } provider_name = "AWS" priority = 7 region_name = "EU_WEST_1" } } + + %[8]s } data "mongodbatlas_advanced_cluster" "test" { @@ -1407,19 +1439,25 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc project_id = mongodbatlas_advanced_cluster.test.project_id use_replication_spec_per_shard = true } - `, orgID, projectName, name, instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1, diskIopsSpec2) + `, orgID, projectName, name, instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1, diskIopsSpec2, thirdReplicationSpec, diskSizeGB) } -func checkShardedNewSchema(instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1, diskIopsSpec2 string, isAsymmetricCluster bool) resource.TestCheckFunc { +func checkShardedNewSchema(diskSizeGB int, instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1, diskIopsSpec2 string, isAsymmetricCluster, includesThirdSpec bool) resource.TestCheckFunc { + var amtOfReplicationSpecs int + if includesThirdSpec { + amtOfReplicationSpecs = 3 + } else { + amtOfReplicationSpecs = 2 + } clusterChecks := map[string]string{ - "disk_size_gb": "60", - "replication_specs.#": "2", + "disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.#": fmt.Sprintf("%d", amtOfReplicationSpecs), "replication_specs.0.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec1, "replication_specs.1.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec2, - "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", - "replication_specs.1.region_configs.0.electable_specs.0.disk_size_gb": "60", - "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", - "replication_specs.1.region_configs.0.analytics_specs.0.disk_size_gb": "60", + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.1.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.1.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), "replication_specs.0.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec1, "replication_specs.1.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec2, }