-
Notifications
You must be signed in to change notification settings - Fork 178
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Support disk_size_gb
at region config level while also preserving backwards compatibility with root level value
#2405
Changes from all commits
b5050fa
0872a72
f780530
fe3ed6f
494901c
603b217
0fadcd8
4c88908
66ee573
8ae8973
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -278,6 +278,20 @@ func IsSharedTier(instanceSize string) bool { | |
return instanceSize == "M0" || instanceSize == "M2" || instanceSize == "M5" | ||
} | ||
|
||
// GetDiskSizeGBFromReplicationSpec obtains the diskSizeGB value by looking into the electable spec of the first replication spec. | ||
// Independent storage size scaling is not supported (CLOUDP-201331), meaning all electable/analytics/readOnly configs in all replication specs are the same. | ||
func GetDiskSizeGBFromReplicationSpec(cluster *admin.ClusterDescription20250101) float64 { | ||
specs := cluster.GetReplicationSpecs() | ||
if len(specs) < 1 { | ||
return 0 | ||
} | ||
configs := specs[0].GetRegionConfigs() | ||
if len(configs) < 1 { | ||
return 0 | ||
} | ||
return configs[0].ElectableSpecs.GetDiskSizeGB() | ||
} | ||
|
||
func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin20231115.ClustersApi) retry.StateRefreshFunc { | ||
return func() (any, string, error) { | ||
cluster, resp, err := client.GetCluster(ctx, projectID, name).Execute() | ||
|
@@ -942,8 +956,11 @@ func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *f | |
apiObject.NodeCount = conversion.Pointer(v.(int)) | ||
} | ||
|
||
// TODO: CLOUDP-258270 once disk_size_gb schema attribute is added at this level, we will check if defined in config and prioritize over value defined at root level (deprecated and to be removed) | ||
apiObject.DiskSizeGB = rootDiskSizeGB | ||
// disk size gb defined in inner level will take precedence over root level. | ||
if v, ok := tfMap["disk_size_gb"]; ok && v.(float64) != 0 { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it's a best practice to make sure conversion can be done, e.g. using v.(float64) with ok param There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Synced offline. For resource using sdk v2 this is a common pattern as the schema is already ensuring a certain type for each attribute meaning this can be considered a safe cast. |
||
apiObject.DiskSizeGB = conversion.Pointer(v.(float64)) | ||
} | ||
|
||
return apiObject | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -64,7 +64,7 @@ func TestAccClusterAdvancedCluster_singleProvider(t *testing.T) { | |
Steps: []resource.TestStep{ | ||
{ | ||
Config: configSingleProvider(projectID, clusterName), | ||
Check: checkSingleProvider(projectID, clusterName), | ||
Check: checkSingleProvider(projectID, clusterName, true), | ||
}, | ||
{ | ||
ResourceName: resourceName, | ||
|
@@ -405,8 +405,8 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *tes | |
CheckDestroy: acc.CheckDestroyCluster, | ||
Steps: []resource.TestStep{ | ||
{ | ||
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false), | ||
Check: checkMultiZoneWithShards(clusterName, 1, 1), | ||
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, false), | ||
Check: checkMultiZoneWithShardsOldSchema(clusterName, 1, 1), | ||
}, | ||
// TODO: CLOUDP-259828 updating from single sharded to using old schema should throw an error here | ||
}, | ||
|
@@ -454,15 +454,15 @@ func TestAccClusterAdvancedClusterConfig_selfManagedSharding(t *testing.T) { | |
CheckDestroy: acc.CheckDestroyCluster, | ||
Steps: []resource.TestStep{ | ||
{ | ||
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, true), | ||
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, true), | ||
Check: resource.ComposeAggregateTestCheckFunc( | ||
checkExists(resourceName), | ||
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "true"), | ||
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "true"), | ||
), | ||
}, | ||
{ | ||
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false), | ||
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, false), | ||
ExpectError: regexp.MustCompile("CANNOT_MODIFY_GLOBAL_CLUSTER_MANAGEMENT_SETTING"), | ||
}, | ||
}, | ||
|
@@ -502,12 +502,33 @@ func TestAccClusterAdvancedClusterConfig_symmetricGeoShardedOldSchema(t *testing | |
CheckDestroy: acc.CheckDestroyCluster, | ||
Steps: []resource.TestStep{ | ||
{ | ||
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 2, 2, false), | ||
Check: checkMultiZoneWithShards(clusterName, 2, 2), | ||
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 2, 2, false), | ||
Check: checkMultiZoneWithShardsOldSchema(clusterName, 2, 2), | ||
}, | ||
{ | ||
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 3, 3, false), | ||
Check: checkMultiZoneWithShards(clusterName, 3, 3), | ||
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 3, 3, false), | ||
Check: checkMultiZoneWithShardsOldSchema(clusterName, 3, 3), | ||
}, | ||
}, | ||
}) | ||
} | ||
|
||
func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchemaDiskSizeGBAtElectableLevel(t *testing.T) { | ||
acc.SkipTestForCI(t) // TODO: CLOUDP-260154 for ensuring this use case is supported | ||
var ( | ||
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") | ||
projectName = acc.RandomProjectName() | ||
clusterName = acc.RandomClusterName() | ||
) | ||
|
||
resource.ParallelTest(t, resource.TestCase{ | ||
PreCheck: func() { acc.PreCheckBasic(t) }, | ||
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, | ||
CheckDestroy: acc.CheckDestroyCluster, | ||
Steps: []resource.TestStep{ | ||
{ | ||
Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName), | ||
Check: checkShardedOldSchemaDiskSizeGBElectableLevel(), | ||
}, | ||
}, | ||
}) | ||
|
@@ -705,6 +726,7 @@ func configSingleProvider(projectID, name string) string { | |
name = %[2]q | ||
cluster_type = "REPLICASET" | ||
retain_backups_enabled = "true" | ||
disk_size_gb = 60 | ||
|
||
replication_specs { | ||
region_configs { | ||
|
@@ -730,15 +752,28 @@ func configSingleProvider(projectID, name string) string { | |
`, projectID, name) | ||
} | ||
|
||
func checkSingleProvider(projectID, name string) resource.TestCheckFunc { | ||
func checkSingleProvider(projectID, name string, checkDiskSizeGBInnerLevel bool) resource.TestCheckFunc { | ||
additionalChecks := []resource.TestCheckFunc{ | ||
resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"), | ||
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), | ||
resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), | ||
} | ||
if checkDiskSizeGBInnerLevel { | ||
additionalChecks = append(additionalChecks, | ||
checkAggr([]string{}, map[string]string{ | ||
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", | ||
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", | ||
}), | ||
) | ||
} | ||
return checkAggr( | ||
[]string{"replication_specs.#", "replication_specs.0.region_configs.#"}, | ||
map[string]string{ | ||
"project_id": projectID, | ||
"name": name}, | ||
resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"), | ||
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), | ||
resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0))) | ||
"project_id": projectID, | ||
"disk_size_gb": "60", | ||
"name": name}, | ||
additionalChecks..., | ||
) | ||
} | ||
|
||
func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string { | ||
|
@@ -1130,7 +1165,7 @@ func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p | |
`, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize()) | ||
} | ||
|
||
func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string { | ||
func configMultiZoneWithShardsOldSchema(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string { | ||
return fmt.Sprintf(` | ||
resource "mongodbatlas_project" "cluster_project" { | ||
org_id = %[1]q | ||
|
@@ -1144,6 +1179,7 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo | |
mongo_db_major_version = "7.0" | ||
cluster_type = "GEOSHARDED" | ||
global_cluster_self_managed_sharding = %[6]t | ||
disk_size_gb = 60 | ||
|
||
replication_specs { | ||
zone_name = "zone n1" | ||
|
@@ -1191,6 +1227,72 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo | |
`, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone, selfManagedSharding) | ||
} | ||
|
||
func checkMultiZoneWithShardsOldSchema(name string, numShardsFirstZone, numShardsSecondZone int) resource.TestCheckFunc { | ||
return checkAggr( | ||
[]string{"project_id"}, | ||
map[string]string{ | ||
"name": name, | ||
"disk_size_gb": "60", | ||
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", | ||
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", | ||
"replication_specs.0.num_shards": strconv.Itoa(numShardsFirstZone), | ||
"replication_specs.1.num_shards": strconv.Itoa(numShardsSecondZone), | ||
}) | ||
} | ||
|
||
func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name string) string { | ||
return fmt.Sprintf(` | ||
resource "mongodbatlas_project" "cluster_project" { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can't we use ProjectIDExecution? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. will adjust in follow up PR, I see we have a mix of both for the advanced_cluster tests |
||
org_id = %[1]q | ||
name = %[2]q | ||
} | ||
|
||
resource "mongodbatlas_advanced_cluster" "test" { | ||
project_id = mongodbatlas_project.cluster_project.id | ||
name = %[3]q | ||
backup_enabled = false | ||
mongo_db_major_version = "7.0" | ||
cluster_type = "SHARDED" | ||
|
||
replication_specs { | ||
num_shards = 2 | ||
|
||
region_configs { | ||
electable_specs { | ||
instance_size = "M10" | ||
node_count = 3 | ||
disk_size_gb = 60 | ||
} | ||
analytics_specs { | ||
instance_size = "M10" | ||
node_count = 0 | ||
disk_size_gb = 60 | ||
} | ||
provider_name = "AWS" | ||
priority = 7 | ||
region_name = "US_EAST_1" | ||
} | ||
} | ||
} | ||
|
||
data "mongodbatlas_advanced_cluster" "test" { | ||
project_id = mongodbatlas_advanced_cluster.test.project_id | ||
name = mongodbatlas_advanced_cluster.test.name | ||
} | ||
`, orgID, projectName, name) | ||
} | ||
|
||
func checkShardedOldSchemaDiskSizeGBElectableLevel() resource.TestCheckFunc { | ||
return checkAggr( | ||
[]string{}, | ||
map[string]string{ | ||
"replication_specs.0.num_shards": "2", | ||
"disk_size_gb": "60", | ||
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", | ||
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", | ||
}) | ||
} | ||
|
||
func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanceSizeSpec2 string, diskIopsSpec1, diskIopsSpec2 int) string { | ||
return fmt.Sprintf(` | ||
resource "mongodbatlas_project" "cluster_project" { | ||
|
@@ -1210,10 +1312,12 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc | |
instance_size = %[4]q | ||
disk_iops = %[6]d | ||
node_count = 3 | ||
disk_size_gb = 60 | ||
} | ||
analytics_specs { | ||
instance_size = %[4]q | ||
node_count = 1 | ||
disk_size_gb = 60 | ||
} | ||
provider_name = "AWS" | ||
priority = 7 | ||
|
@@ -1227,10 +1331,12 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc | |
instance_size = %[5]q | ||
disk_iops = %[7]d | ||
node_count = 3 | ||
disk_size_gb = 60 | ||
} | ||
analytics_specs { | ||
instance_size = %[5]q | ||
node_count = 1 | ||
disk_size_gb = 60 | ||
} | ||
provider_name = "AWS" | ||
priority = 7 | ||
|
@@ -1251,21 +1357,16 @@ func checkShardedNewSchema(instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1, | |
return checkAggr( | ||
[]string{"replication_specs.0.external_id", "replication_specs.0.zone_id", "replication_specs.1.external_id", "replication_specs.1.zone_id"}, | ||
map[string]string{ | ||
"disk_size_gb": "60", | ||
"replication_specs.#": "2", | ||
"replication_specs.0.id": "", | ||
"replication_specs.0.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec1, | ||
"replication_specs.1.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec2, | ||
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", | ||
"replication_specs.1.region_configs.0.electable_specs.0.disk_size_gb": "60", | ||
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", | ||
"replication_specs.1.region_configs.0.analytics_specs.0.disk_size_gb": "60", | ||
"replication_specs.0.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec1, | ||
"replication_specs.1.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec2, | ||
}) | ||
} | ||
|
||
func checkMultiZoneWithShards(name string, numShardsFirstZone, numShardsSecondZone int) resource.TestCheckFunc { | ||
return checkAggr( | ||
[]string{"project_id"}, | ||
map[string]string{ | ||
"name": name, | ||
"replication_specs.0.num_shards": strconv.Itoa(numShardsFirstZone), | ||
"replication_specs.1.num_shards": strconv.Itoa(numShardsSecondZone), | ||
}) | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
is there always an electable spec, even if not defined in the TF file?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Correct, the API always returns
electableSpecs
,readOnlySpecs
, andanalyticsSpecs
even if not defined in the request. Example of readOnlySpecs response when not defined: