Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Support disk_size_gb at region config level while also preserving backwards compatibility with root level value #2405

Merged
merged 10 commits into from
Jul 12, 2024
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,11 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.

clusterID = clusterDescLatest.GetId()

// root disk_size_gb defined for backwards compatibility avoiding breaking changes
if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(clusterDescLatest)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err))
}

replicationSpecs, err = flattenAdvancedReplicationSpecsDS(ctx, clusterDescLatest.GetReplicationSpecs(), d, connV2)
if err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
Expand Down
19 changes: 18 additions & 1 deletion internal/service/advancedcluster/model_advanced_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,20 @@ func IsSharedTier(instanceSize string) bool {
return instanceSize == "M0" || instanceSize == "M2" || instanceSize == "M5"
}

// GetDiskSizeGBFromReplicationSpec obtains the diskSizeGB value by looking into the electable spec of the first replication spec.
// Independent storage size scaling is not supported (CLOUDP-201331), meaning all electable/analytics/readOnly configs in all replication specs are the same.
func GetDiskSizeGBFromReplicationSpec(cluster *admin.ClusterDescription20250101) float64 {
specs := cluster.GetReplicationSpecs()
if len(specs) < 1 {
return 0
}
configs := specs[0].GetRegionConfigs()
if len(configs) < 1 {
return 0
}
return configs[0].ElectableSpecs.GetDiskSizeGB()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is there always an electable spec, even if not defined in the TF file?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correct, the API always returns electableSpecs, readOnlySpecs, and analyticsSpecs even if not defined in the request. Example of readOnlySpecs response when not defined:

"readOnlySpecs": {
    "instanceSize": "M30",
    "diskIOPS": 120,
    "diskSizeGB": 32.0,
    "diskThroughput": 25,
    "nodeCount": 0
}

}

func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin20231115.ClustersApi) retry.StateRefreshFunc {
return func() (any, string, error) {
cluster, resp, err := client.GetCluster(ctx, projectID, name).Execute()
Expand Down Expand Up @@ -942,8 +956,11 @@ func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *f
apiObject.NodeCount = conversion.Pointer(v.(int))
}

// TODO: CLOUDP-258270 once disk_size_gb schema attribute is added at this level, we will check if defined in config and prioritize over value defined at root level (deprecated and to be removed)
apiObject.DiskSizeGB = rootDiskSizeGB
// disk size gb defined in inner level will take precedence over root level.
if v, ok := tfMap["disk_size_gb"]; ok && v.(float64) != 0 {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it's a best practice to make sure conversion can be done, e.g. using v.(float64) with ok param

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Synced offline. For resource using sdk v2 this is a common pattern as the schema is already ensuring a certain type for each attribute meaning this can be considered a safe cast.
Main con of handling the error is that is would add significant boilerplate for each time we access a value from the TF model. This was solved in the terraform plugin framework as we parse the TF model into a typed struct.
cc: @EspenAlbert as you were seeing these warning in the new linter.

apiObject.DiskSizeGB = conversion.Pointer(v.(float64))
}

return apiObject
}
Expand Down
41 changes: 41 additions & 0 deletions internal/service/advancedcluster/model_advanced_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package advancedcluster_test
import (
"context"
"errors"
"fmt"
"net/http"
"testing"

Expand Down Expand Up @@ -150,6 +151,46 @@ func TestFlattenReplicationSpecs(t *testing.T) {
}
}

func TestGetDiskSizeGBFromReplicationSpec(t *testing.T) {
diskSizeGBValue := 40.0

testCases := map[string]struct {
clusterDescription admin.ClusterDescription20250101
expectedDiskSizeResult float64
}{
"cluster description with disk size gb value at electable spec": {
clusterDescription: admin.ClusterDescription20250101{
ReplicationSpecs: &[]admin.ReplicationSpec20250101{{
RegionConfigs: &[]admin.CloudRegionConfig20250101{{
ElectableSpecs: &admin.HardwareSpec20250101{
DiskSizeGB: admin.PtrFloat64(diskSizeGBValue),
},
}},
}},
},
expectedDiskSizeResult: diskSizeGBValue,
},
"cluster description with no electable spec": {
clusterDescription: admin.ClusterDescription20250101{
ReplicationSpecs: &[]admin.ReplicationSpec20250101{
{RegionConfigs: &[]admin.CloudRegionConfig20250101{{}}},
},
},
expectedDiskSizeResult: 0,
},
"cluster description with no replication spec": {
clusterDescription: admin.ClusterDescription20250101{},
expectedDiskSizeResult: 0,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
result := advancedcluster.GetDiskSizeGBFromReplicationSpec(&tc.clusterDescription)
assert.Equal(t, fmt.Sprintf("%.f", tc.expectedDiskSizeResult), fmt.Sprintf("%.f", result)) // formatting to string to avoid float comparison
})
}
}

type Result struct {
response any
error error
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,10 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
return diag.FromErr(fmt.Errorf(errorRead, clusterName, err))
}

// TODO: CLOUDP-258270 set disk size gb at root level
// root disk_size_gb defined for backwards compatibility avoiding breaking changes
if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(cluster)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err))
}

replicationSpecs, err = flattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV2)
if err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func TestMigAdvancedCluster_singleAWSProvider(t *testing.T) {
{
ExternalProviders: mig.ExternalProviders(),
Config: config,
Check: checkSingleProvider(projectID, clusterName),
Check: checkSingleProvider(projectID, clusterName, false),
},
mig.TestStepCheckEmptyPlan(config),
},
Expand Down
153 changes: 127 additions & 26 deletions internal/service/advancedcluster/resource_advanced_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func TestAccClusterAdvancedCluster_singleProvider(t *testing.T) {
Steps: []resource.TestStep{
{
Config: configSingleProvider(projectID, clusterName),
Check: checkSingleProvider(projectID, clusterName),
Check: checkSingleProvider(projectID, clusterName, true),
},
{
ResourceName: resourceName,
Expand Down Expand Up @@ -405,8 +405,8 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *tes
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
Check: checkMultiZoneWithShards(clusterName, 1, 1),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, false),
Check: checkMultiZoneWithShardsOldSchema(clusterName, 1, 1),
},
// TODO: CLOUDP-259828 updating from single sharded to using old schema should throw an error here
},
Expand Down Expand Up @@ -454,15 +454,15 @@ func TestAccClusterAdvancedClusterConfig_selfManagedSharding(t *testing.T) {
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, true),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, true),
Check: resource.ComposeAggregateTestCheckFunc(
checkExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "true"),
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "true"),
),
},
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, false),
ExpectError: regexp.MustCompile("CANNOT_MODIFY_GLOBAL_CLUSTER_MANAGEMENT_SETTING"),
},
},
Expand Down Expand Up @@ -502,12 +502,33 @@ func TestAccClusterAdvancedClusterConfig_symmetricGeoShardedOldSchema(t *testing
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 2, 2, false),
Check: checkMultiZoneWithShards(clusterName, 2, 2),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 2, 2, false),
Check: checkMultiZoneWithShardsOldSchema(clusterName, 2, 2),
},
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 3, 3, false),
Check: checkMultiZoneWithShards(clusterName, 3, 3),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 3, 3, false),
Check: checkMultiZoneWithShardsOldSchema(clusterName, 3, 3),
},
},
})
}

func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchemaDiskSizeGBAtElectableLevel(t *testing.T) {
acc.SkipTestForCI(t) // TODO: CLOUDP-260154 for ensuring this use case is supported
var (
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
projectName = acc.RandomProjectName()
clusterName = acc.RandomClusterName()
)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acc.PreCheckBasic(t) },
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName),
Check: checkShardedOldSchemaDiskSizeGBElectableLevel(),
},
},
})
Expand Down Expand Up @@ -705,6 +726,7 @@ func configSingleProvider(projectID, name string) string {
name = %[2]q
cluster_type = "REPLICASET"
retain_backups_enabled = "true"
disk_size_gb = 60

replication_specs {
region_configs {
Expand All @@ -730,15 +752,28 @@ func configSingleProvider(projectID, name string) string {
`, projectID, name)
}

func checkSingleProvider(projectID, name string) resource.TestCheckFunc {
func checkSingleProvider(projectID, name string, checkDiskSizeGBInnerLevel bool) resource.TestCheckFunc {
additionalChecks := []resource.TestCheckFunc{
resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"),
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)),
resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)),
}
if checkDiskSizeGBInnerLevel {
additionalChecks = append(additionalChecks,
checkAggr([]string{}, map[string]string{
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
}),
)
}
return checkAggr(
[]string{"replication_specs.#", "replication_specs.0.region_configs.#"},
map[string]string{
"project_id": projectID,
"name": name},
resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"),
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)),
resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)))
"project_id": projectID,
"disk_size_gb": "60",
"name": name},
additionalChecks...,
)
}

func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string {
Expand Down Expand Up @@ -1130,7 +1165,7 @@ func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p
`, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize())
}

func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string {
func configMultiZoneWithShardsOldSchema(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
org_id = %[1]q
Expand All @@ -1144,6 +1179,7 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo
mongo_db_major_version = "7.0"
cluster_type = "GEOSHARDED"
global_cluster_self_managed_sharding = %[6]t
disk_size_gb = 60

replication_specs {
zone_name = "zone n1"
Expand Down Expand Up @@ -1191,6 +1227,72 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo
`, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone, selfManagedSharding)
}

func checkMultiZoneWithShardsOldSchema(name string, numShardsFirstZone, numShardsSecondZone int) resource.TestCheckFunc {
return checkAggr(
[]string{"project_id"},
map[string]string{
"name": name,
"disk_size_gb": "60",
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
"replication_specs.0.num_shards": strconv.Itoa(numShardsFirstZone),
"replication_specs.1.num_shards": strconv.Itoa(numShardsSecondZone),
})
}

func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name string) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can't we use ProjectIDExecution?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will adjust in follow up PR, I see we have a mix of both for the advanced_cluster tests

org_id = %[1]q
name = %[2]q
}

resource "mongodbatlas_advanced_cluster" "test" {
project_id = mongodbatlas_project.cluster_project.id
name = %[3]q
backup_enabled = false
mongo_db_major_version = "7.0"
cluster_type = "SHARDED"

replication_specs {
num_shards = 2

region_configs {
electable_specs {
instance_size = "M10"
node_count = 3
disk_size_gb = 60
}
analytics_specs {
instance_size = "M10"
node_count = 0
disk_size_gb = 60
}
provider_name = "AWS"
priority = 7
region_name = "US_EAST_1"
}
}
}

data "mongodbatlas_advanced_cluster" "test" {
project_id = mongodbatlas_advanced_cluster.test.project_id
name = mongodbatlas_advanced_cluster.test.name
}
`, orgID, projectName, name)
}

func checkShardedOldSchemaDiskSizeGBElectableLevel() resource.TestCheckFunc {
return checkAggr(
[]string{},
map[string]string{
"replication_specs.0.num_shards": "2",
"disk_size_gb": "60",
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
})
}

func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanceSizeSpec2 string, diskIopsSpec1, diskIopsSpec2 int) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
Expand All @@ -1210,10 +1312,12 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc
instance_size = %[4]q
disk_iops = %[6]d
node_count = 3
disk_size_gb = 60
}
analytics_specs {
instance_size = %[4]q
node_count = 1
disk_size_gb = 60
}
provider_name = "AWS"
priority = 7
Expand All @@ -1227,10 +1331,12 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc
instance_size = %[5]q
disk_iops = %[7]d
node_count = 3
disk_size_gb = 60
}
analytics_specs {
instance_size = %[5]q
node_count = 1
disk_size_gb = 60
}
provider_name = "AWS"
priority = 7
Expand All @@ -1251,21 +1357,16 @@ func checkShardedNewSchema(instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1,
return checkAggr(
[]string{"replication_specs.0.external_id", "replication_specs.0.zone_id", "replication_specs.1.external_id", "replication_specs.1.zone_id"},
map[string]string{
"disk_size_gb": "60",
"replication_specs.#": "2",
"replication_specs.0.id": "",
"replication_specs.0.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec1,
"replication_specs.1.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec2,
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.1.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
"replication_specs.1.region_configs.0.analytics_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec1,
"replication_specs.1.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec2,
})
}

func checkMultiZoneWithShards(name string, numShardsFirstZone, numShardsSecondZone int) resource.TestCheckFunc {
return checkAggr(
[]string{"project_id"},
map[string]string{
"name": name,
"replication_specs.0.num_shards": strconv.Itoa(numShardsFirstZone),
"replication_specs.1.num_shards": strconv.Itoa(numShardsSecondZone),
})
}
Loading