From 74ce676f395012893114963ccb8bb140912f53d0 Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Tue, 6 Aug 2024 14:24:28 +0200 Subject: [PATCH] including test for adding/removing replication specs in the middle for sharded and geosharded cluster (#2479) --- .../resource_advanced_cluster_test.go | 168 +++++++++++++++--- 1 file changed, 144 insertions(+), 24 deletions(-) diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 4f7119f244..15817a8aa0 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -569,7 +569,7 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchemaDiskSizeGBAtEl }) } -func TestAccClusterAdvancedClusterConfig_symmetricShardedNewSchema(t *testing.T) { +func TestAccClusterAdvancedClusterConfig_symmetricShardedNewSchemaToAsymmetricAddingRemovingShard(t *testing.T) { var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() @@ -586,8 +586,12 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedNewSchema(t *testing.T) Check: checkShardedNewSchema(50, "M30", "M30", "2000", "2000", false, false), }, { - Config: configShardedNewSchema(orgID, projectName, clusterName, 55, "M30", "M40", 2000, 2000, true), - Check: checkShardedNewSchema(55, "M30", "M40", "2000", "2000", true, true), + Config: configShardedNewSchema(orgID, projectName, clusterName, 55, "M30", "M40", 2000, 2500, true), + Check: checkShardedNewSchema(55, "M30", "M40", "2000", "2500", true, true), + }, + { + Config: configShardedNewSchema(orgID, projectName, clusterName, 55, "M30", "M40", 2000, 2500, false), // removes middle replication spec + Check: checkShardedNewSchema(55, "M30", "M40", "2000", "2500", true, false), }, }, }) @@ -613,6 +617,34 @@ func TestAccClusterAdvancedClusterConfig_asymmetricShardedNewSchema(t *testing.T }) } +func TestAccClusterAdvancedClusterConfig_asymmetricGeoShardedNewSchemaAddingRemovingShard(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configGeoShardedNewSchema(orgID, projectName, clusterName, false), + Check: checkGeoShardedNewSchema(false), + }, + { + Config: configGeoShardedNewSchema(orgID, projectName, clusterName, true), + Check: checkGeoShardedNewSchema(true), + }, + { + Config: configGeoShardedNewSchema(orgID, projectName, clusterName, false), + Check: checkGeoShardedNewSchema(false), + }, + }, + }) +} + func TestAccClusterAdvancedClusterConfig_shardedTransitionFromOldToNewSchema(t *testing.T) { var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") @@ -1426,28 +1458,28 @@ func checkShardedOldSchemaDiskSizeGBElectableLevel(diskSizeGB int) resource.Test }) } -func configShardedNewSchema(orgID, projectName, name string, diskSizeGB int, instanceSizeSpec1, instanceSizeSpec2 string, diskIopsSpec1, diskIopsSpec2 int, includeThirdSpec bool) string { +func configShardedNewSchema(orgID, projectName, name string, diskSizeGB int, firstInstanceSize, lastInstanceSize string, firstDiskIops, lastDiskIops int, includeMiddleSpec bool) string { var thirdReplicationSpec string - if includeThirdSpec { + if includeMiddleSpec { thirdReplicationSpec = fmt.Sprintf(` replication_specs { region_configs { electable_specs { instance_size = %[1]q node_count = 3 - disk_size_gb = %[3]d + disk_size_gb = %[2]d } analytics_specs { instance_size = %[1]q node_count = 1 - disk_size_gb = %[3]d + disk_size_gb = %[2]d } provider_name = "AWS" priority = 7 region_name = "EU_WEST_1" } } - `, instanceSizeSpec1, diskIopsSpec1, diskSizeGB) + `, firstInstanceSize, diskSizeGB) } return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { @@ -1481,6 +1513,8 @@ func configShardedNewSchema(orgID, projectName, name string, diskSizeGB int, ins } } + %[8]s + replication_specs { region_configs { electable_specs { @@ -1500,8 +1534,6 @@ func configShardedNewSchema(orgID, projectName, name string, diskSizeGB int, ins region_name = "EU_WEST_1" } } - - %[8]s } data "mongodbatlas_advanced_cluster" "test" { @@ -1514,27 +1546,31 @@ func configShardedNewSchema(orgID, projectName, name string, diskSizeGB int, ins project_id = mongodbatlas_advanced_cluster.test.project_id use_replication_spec_per_shard = true } - `, orgID, projectName, name, instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1, diskIopsSpec2, thirdReplicationSpec, diskSizeGB) + `, orgID, projectName, name, firstInstanceSize, lastInstanceSize, firstDiskIops, lastDiskIops, thirdReplicationSpec, diskSizeGB) } -func checkShardedNewSchema(diskSizeGB int, instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1, diskIopsSpec2 string, isAsymmetricCluster, includesThirdSpec bool) resource.TestCheckFunc { - var amtOfReplicationSpecs int - if includesThirdSpec { +func checkShardedNewSchema(diskSizeGB int, firstInstanceSize, lastInstanceSize, firstDiskIops, lastDiskIops string, isAsymmetricCluster, includeMiddleSpec bool) resource.TestCheckFunc { + amtOfReplicationSpecs := 2 + if includeMiddleSpec { amtOfReplicationSpecs = 3 - } else { - amtOfReplicationSpecs = 2 } + + lastSpecIndex := 1 + if includeMiddleSpec { + lastSpecIndex = 2 + } + clusterChecks := map[string]string{ "disk_size_gb": fmt.Sprintf("%d", diskSizeGB), "replication_specs.#": fmt.Sprintf("%d", amtOfReplicationSpecs), - "replication_specs.0.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec1, - "replication_specs.1.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec2, - "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), - "replication_specs.1.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), - "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), - "replication_specs.1.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), - "replication_specs.0.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec1, - "replication_specs.1.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec2, + "replication_specs.0.region_configs.0.electable_specs.0.instance_size": firstInstanceSize, + fmt.Sprintf("replication_specs.%d.region_configs.0.electable_specs.0.instance_size", lastSpecIndex): lastInstanceSize, + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + fmt.Sprintf("replication_specs.%d.region_configs.0.electable_specs.0.disk_size_gb", lastSpecIndex): fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + fmt.Sprintf("replication_specs.%d.region_configs.0.analytics_specs.0.disk_size_gb", lastSpecIndex): fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.electable_specs.0.disk_iops": firstDiskIops, + fmt.Sprintf("replication_specs.%d.region_configs.0.electable_specs.0.disk_iops", lastSpecIndex): lastDiskIops, } // plural data source checks @@ -1564,6 +1600,90 @@ func checkShardedNewSchema(diskSizeGB int, instanceSizeSpec1, instanceSizeSpec2, ) } +func configGeoShardedNewSchema(orgID, projectName, name string, includeThirdShardInFirstZone bool) string { + var thirdReplicationSpec string + if includeThirdShardInFirstZone { + thirdReplicationSpec = ` + replication_specs { + zone_name = "zone n1" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + ` + } + return fmt.Sprintf(` + resource "mongodbatlas_project" "cluster_project" { + org_id = %[1]q + name = %[2]q + } + resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.cluster_project.id + name = %[3]q + backup_enabled = false + mongo_db_major_version = "7.0" + cluster_type = "GEOSHARDED" + replication_specs { + zone_name = "zone n1" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + %[4]s + replication_specs { + zone_name = "zone n2" + region_configs { + electable_specs { + instance_size = "M20" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + } + data "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + name = mongodbatlas_advanced_cluster.test.name + use_replication_spec_per_shard = true + } + data "mongodbatlas_advanced_clusters" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + use_replication_spec_per_shard = true + } + `, orgID, projectName, name, thirdReplicationSpec) +} + +func checkGeoShardedNewSchema(includeThirdShardInFirstZone bool) resource.TestCheckFunc { + var amtOfReplicationSpecs int + if includeThirdShardInFirstZone { + amtOfReplicationSpecs = 3 + } else { + amtOfReplicationSpecs = 2 + } + clusterChecks := map[string]string{ + "replication_specs.#": fmt.Sprintf("%d", amtOfReplicationSpecs), + } + + return checkAggr( + []string{}, + clusterChecks, + ) +} + func configShardedTransitionOldToNewSchema(orgID, projectName, name string, useNewSchema bool) string { var numShardsStr string if !useNewSchema {