diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 10b95389b3..0e759eafcf 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -25,6 +25,11 @@ import ( const minVersionForChangeStreamOptions = 6.0 +type OldShardConfigMeta struct { + ID string + NumShard int +} + var ( DSTagsSchema = schema.Schema{ Type: schema.TypeSet, @@ -514,6 +519,7 @@ func flattenProcessArgs(p20240530 *admin20240530.ClusterDescriptionProcessArgs, return flattenedProcessArgs } +// This function is untilised by DS read which still uses the old API and will be removed when refactor to DS read operation is implemented. func FlattenAdvancedReplicationSpecsOldSDK(ctx context.Context, apiObjects []admin20240530.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObjects []any, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { // for flattening old model we need information of value defined at root disk_size_gb so we set the value in new location under hardware specs @@ -524,6 +530,16 @@ func FlattenAdvancedReplicationSpecsOldSDK(ctx context.Context, apiObjects []adm doesAdvancedReplicationSpecMatchAPIOldSDK, replicationSpecFlattener, connV2) } +func FlattenAdvancedReplicationSpecsOldShardingConfig(ctx context.Context, apiObjects []admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecMeta map[string]OldShardConfigMeta, tfMapObjects []any, + d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { + replicationSpecFlattener := func(ctx context.Context, sdkModel *admin.ReplicationSpec20240805, tfModel map[string]any, resourceData *schema.ResourceData, client *admin.APIClient) (map[string]any, error) { + return flattenAdvancedReplicationSpecOldShardingConfig(ctx, sdkModel, zoneNameToOldReplicationSpecMeta, tfModel, resourceData, connV2) + } + compressedAPIObjects := compressAPIObjectList(apiObjects) + return flattenAdvancedReplicationSpecsLogic[admin.ReplicationSpec20240805](ctx, compressedAPIObjects, tfMapObjects, d, + doesAdvancedReplicationSpecMatchAPIOldShardConfig, replicationSpecFlattener, connV2) +} + func flattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, tfMapObjects []any, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { // for flattening new model we need information of replication spec ids associated to old API to avoid breaking changes for users referencing replication_specs.*.id @@ -534,6 +550,30 @@ func flattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.Rep doesAdvancedReplicationSpecMatchAPI, replicationSpecFlattener, connV2) } +// compressAPIObjectList returns an array of ReplicationSpec20240805. The input array is reduced from all shards to only one shard per zoneName +func compressAPIObjectList(apiObjects []admin.ReplicationSpec20240805) []admin.ReplicationSpec20240805 { + var compressedAPIObjectList []admin.ReplicationSpec20240805 + wasZoneNameUsed := populateZoneNameMap(apiObjects) + for _, apiObject := range apiObjects { + if !wasZoneNameUsed[apiObject.GetZoneName()] { + compressedAPIObjectList = append(compressedAPIObjectList, apiObject) + wasZoneNameUsed[apiObject.GetZoneName()] = true + } + } + return compressedAPIObjectList +} + +// populateZoneNameMap returns a map of zoneNames and initializes all keys to false. +func populateZoneNameMap(apiObjects []admin.ReplicationSpec20240805) map[string]bool { + zoneNames := make(map[string]bool) + for _, apiObject := range apiObjects { + if _, exists := zoneNames[apiObject.GetZoneName()]; !exists { + zoneNames[apiObject.GetZoneName()] = false + } + } + return zoneNames +} + type ReplicationSpecSDKModel interface { admin20240530.ReplicationSpec | admin.ReplicationSpec20240805 } @@ -556,7 +596,6 @@ func flattenAdvancedReplicationSpecsLogic[T ReplicationSpecSDKModel]( if len(tfMapObjects) > i { tfMapObject = tfMapObjects[i].(map[string]any) } - for j := 0; j < len(apiObjects); j++ { if wasAPIObjectUsed[j] || !tfModelWithSDKMatcher(tfMapObject, &apiObjects[j]) { continue @@ -599,10 +638,15 @@ func flattenAdvancedReplicationSpecsLogic[T ReplicationSpecSDKModel]( return tfList, nil } +// This function is untilised by DS read which still uses the old API and will be removed when refactor to DS read operation is implemented. func doesAdvancedReplicationSpecMatchAPIOldSDK(tfObject map[string]any, apiObject *admin20240530.ReplicationSpec) bool { return tfObject["id"] == apiObject.GetId() || (tfObject["id"] == nil && tfObject["zone_name"] == apiObject.GetZoneName()) } +func doesAdvancedReplicationSpecMatchAPIOldShardConfig(tfObject map[string]any, apiObject *admin.ReplicationSpec20240805) bool { + return tfObject["zone_name"] == apiObject.GetZoneName() +} + func doesAdvancedReplicationSpecMatchAPI(tfObject map[string]any, apiObject *admin.ReplicationSpec20240805) bool { return tfObject["external_id"] == apiObject.GetId() } @@ -1111,6 +1155,7 @@ func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.Replic return tfMap, nil } +// This function is untilised by DS read which still uses the old API and will be removed when refactor to DS read operation is implemented. func flattenAdvancedReplicationSpecOldSDK(ctx context.Context, apiObject *admin20240530.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObject map[string]any, d *schema.ResourceData, connV2 *admin.APIClient) (map[string]any, error) { if apiObject == nil { @@ -1142,3 +1187,35 @@ func flattenAdvancedReplicationSpecOldSDK(ctx context.Context, apiObject *admin2 return tfMap, nil } + +func flattenAdvancedReplicationSpecOldShardingConfig(ctx context.Context, apiObject *admin.ReplicationSpec20240805, zoneNameToOldShardConfigMeta map[string]OldShardConfigMeta, tfMapObject map[string]any, + d *schema.ResourceData, connV2 *admin.APIClient) (map[string]any, error) { + if apiObject == nil { + return nil, nil + } + + tfMap := map[string]any{} + if oldShardConfigData, ok := zoneNameToOldShardConfigMeta[apiObject.GetZoneName()]; ok { + tfMap["num_shards"] = oldShardConfigData.NumShard + tfMap["id"] = oldShardConfigData.ID + } + if tfMapObject != nil { + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), tfMapObject["region_configs"].([]any), d, connV2) + if err != nil { + return nil, err + } + tfMap["region_configs"] = object + tfMap["container_id"] = containerIDs + } else { + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), nil, d, connV2) + if err != nil { + return nil, err + } + tfMap["region_configs"] = object + tfMap["container_id"] = containerIDs + } + tfMap["zone_name"] = apiObject.GetZoneName() + tfMap["zone_id"] = apiObject.GetZoneId() + + return tfMap, nil +} diff --git a/internal/service/advancedcluster/model_advanced_cluster_test.go b/internal/service/advancedcluster/model_advanced_cluster_test.go index 7dd517c469..1a9cb6a12d 100644 --- a/internal/service/advancedcluster/model_advanced_cluster_test.go +++ b/internal/service/advancedcluster/model_advanced_cluster_test.go @@ -149,6 +149,137 @@ func TestFlattenReplicationSpecs(t *testing.T) { } } +func TestFlattenAdvancedReplicationSpecsOldShardingConfig(t *testing.T) { + var ( + regionName = "EU_WEST_1" + providerName = "AWS" + expectedID = "id1" + unexpectedID = "id2" + expectedZoneName = "z1" + unexpectedZoneName = "z2" + regionConfigAdmin = []admin.CloudRegionConfig20240805{{ + ProviderName: &providerName, + RegionName: ®ionName, + }} + regionConfigTfSameZone = map[string]any{ + "provider_name": "AWS", + "region_name": regionName, + } + regionConfigTfDiffZone = map[string]any{ + "provider_name": "AWS", + "region_name": regionName, + "zone_name": unexpectedZoneName, + } + apiSpecExpected = admin.ReplicationSpec20240805{Id: &expectedID, ZoneName: &expectedZoneName, RegionConfigs: ®ionConfigAdmin} + apiSpecDifferent = admin.ReplicationSpec20240805{Id: &unexpectedID, ZoneName: &unexpectedZoneName, RegionConfigs: ®ionConfigAdmin} + testSchema = map[string]*schema.Schema{ + "project_id": {Type: schema.TypeString}, + } + tfSameIDSameZone = map[string]any{ + "id": expectedID, + "num_shards": 1, + "region_configs": []any{regionConfigTfSameZone}, + "zone_name": expectedZoneName, + } + tfNoIDSameZone = map[string]any{ + "id": nil, + "num_shards": 1, + "region_configs": []any{regionConfigTfSameZone}, + "zone_name": expectedZoneName, + } + tfNoIDDiffZone = map[string]any{ + "id": nil, + "num_shards": 1, + "region_configs": []any{regionConfigTfDiffZone}, + "zone_name": unexpectedZoneName, + } + tfdiffIDDiffZone = map[string]any{ + "id": "unique", + "num_shards": 1, + "region_configs": []any{regionConfigTfDiffZone}, + "zone_name": unexpectedZoneName, + } + ) + testCases := map[string]struct { + adminSpecs []admin.ReplicationSpec20240805 + zoneNameToOldReplicationSpecMeta map[string]advancedcluster.OldShardConfigMeta + tfInputSpecs []any + expectedLen int + }{ + "empty admin spec should return empty list": { + []admin.ReplicationSpec20240805{}, + map[string]advancedcluster.OldShardConfigMeta{}, + []any{tfSameIDSameZone}, + 0, + }, + "existing id, should match admin": { + []admin.ReplicationSpec20240805{apiSpecExpected}, + map[string]advancedcluster.OldShardConfigMeta{expectedZoneName: {expectedID, 1}}, + []any{tfSameIDSameZone}, + 1, + }, + "existing different id, should change to admin spec": { + []admin.ReplicationSpec20240805{apiSpecExpected}, + map[string]advancedcluster.OldShardConfigMeta{expectedZoneName: {expectedID, 1}}, + []any{tfdiffIDDiffZone}, + 1, + }, + "missing id, should be set when zone_name matches": { + []admin.ReplicationSpec20240805{apiSpecExpected}, + map[string]advancedcluster.OldShardConfigMeta{expectedZoneName: {expectedID, 1}}, + []any{tfNoIDSameZone}, + 1, + }, + "missing id and diff zone, should change to admin spec": { + []admin.ReplicationSpec20240805{apiSpecExpected}, + map[string]advancedcluster.OldShardConfigMeta{expectedZoneName: {expectedID, 1}}, + []any{tfNoIDDiffZone}, + 1, + }, + "existing id, should match correct api spec using `id` and extra api spec added": { + []admin.ReplicationSpec20240805{apiSpecDifferent, apiSpecExpected}, + map[string]advancedcluster.OldShardConfigMeta{unexpectedZoneName: {unexpectedID, 1}, expectedZoneName: {expectedID, 1}}, + []any{tfSameIDSameZone}, + 2, + }, + "missing id, should match correct api spec using `zone_name` and extra api spec added": { + []admin.ReplicationSpec20240805{apiSpecDifferent, apiSpecExpected}, + map[string]advancedcluster.OldShardConfigMeta{unexpectedZoneName: {unexpectedID, 1}, expectedZoneName: {expectedID, 1}}, + []any{tfNoIDSameZone}, + 2, + }, + "two matching specs should be set to api specs": { + []admin.ReplicationSpec20240805{apiSpecExpected, apiSpecDifferent}, + map[string]advancedcluster.OldShardConfigMeta{expectedZoneName: {expectedID, 1}, unexpectedZoneName: {unexpectedID, 1}}, + []any{tfSameIDSameZone, tfdiffIDDiffZone}, + 2, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + peeringAPI := mockadmin.NetworkPeeringApi{} + + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderWithParams(mock.Anything, mock.Anything).Return(admin.ListPeeringContainerByCloudProviderApiRequest{ApiService: &peeringAPI}) + containerResult := []admin.CloudProviderContainer{{Id: conversion.StringPtr("c1"), RegionName: ®ionName, ProviderName: &providerName}} + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.Anything).Return(&admin.PaginatedCloudProviderContainer{Results: &containerResult}, nil, nil) + + client := &admin.APIClient{ + NetworkPeeringApi: &peeringAPI, + } + resourceData := schema.TestResourceDataRaw(t, testSchema, map[string]any{"project_id": "p1"}) + + tfOutputSpecs, err := advancedcluster.FlattenAdvancedReplicationSpecsOldShardingConfig(context.Background(), tc.adminSpecs, tc.zoneNameToOldReplicationSpecMeta, tc.tfInputSpecs, resourceData, client) + + require.NoError(t, err) + assert.Len(t, tfOutputSpecs, tc.expectedLen) + if tc.expectedLen != 0 { + assert.Equal(t, expectedID, tfOutputSpecs[0]["id"]) + assert.Equal(t, expectedZoneName, tfOutputSpecs[0]["zone_name"]) + } + }) + } +} + func TestGetDiskSizeGBFromReplicationSpec(t *testing.T) { diskSizeGBValue := 40.0 diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 2b35fcf397..6bc75d16e6 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -565,84 +565,37 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di projectID := ids["project_id"] clusterName := ids["cluster_name"] - var clusterResp *admin.ClusterDescription20240805 - var replicationSpecs []map[string]any - if isUsingOldShardingConfiguration(d) { - clusterOldSDK, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) - } - if err := d.Set("disk_size_gb", clusterOldSDK.GetDiskSizeGB()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) - } - cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) - } - if err := d.Set("replica_set_scaling_strategy", cluster.GetReplicaSetScalingStrategy()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replica_set_scaling_strategy", clusterName, err)) - } - if err := d.Set("redact_client_log_data", cluster.GetRedactClientLogData()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) + cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil } - zoneNameToZoneIDs, err := getZoneIDsFromNewAPI(cluster) + return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) + } + + if isUsingOldShardingConfiguration(d) { + zoneNameToOldReplicationSpecMeta, err := GetReplicationSpecAttributesFromOldAPI(ctx, projectID, clusterName, connV220240530.ClustersApi) if err != nil { return diag.FromErr(err) } - - replicationSpecs, err = FlattenAdvancedReplicationSpecsOldSDK(ctx, clusterOldSDK.GetReplicationSpecs(), zoneNameToZoneIDs, clusterOldSDK.GetDiskSizeGB(), d.Get("replication_specs").([]any), d, connV2) + replicationSpecs, err = FlattenAdvancedReplicationSpecsOldShardingConfig(ctx, cluster.GetReplicationSpecs(), zoneNameToOldReplicationSpecMeta, d.Get("replication_specs").([]any), d, connV2) if err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } - - clusterResp = convertClusterDescToLatestExcludeRepSpecs(clusterOldSDK) - clusterResp.ConfigServerManagementMode = cluster.ConfigServerManagementMode - clusterResp.ConfigServerType = cluster.ConfigServerType } else { - cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } - return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) - } - - // root disk_size_gb defined for backwards compatibility avoiding breaking changes - if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(cluster)); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) - } - if err := d.Set("replica_set_scaling_strategy", cluster.GetReplicaSetScalingStrategy()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replica_set_scaling_strategy", clusterName, err)) - } - if err := d.Set("redact_client_log_data", cluster.GetRedactClientLogData()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) - } - - zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) + zoneNameToOldReplicationSpecID, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) if err != nil { return diag.FromErr(err) } - - replicationSpecs, err = flattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), zoneNameToOldReplicationSpecIDs, d.Get("replication_specs").([]any), d, connV2) + replicationSpecs, err = flattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), zoneNameToOldReplicationSpecID, d.Get("replication_specs").([]any), d, connV2) if err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } - - clusterResp = cluster } - - diags := setRootFields(d, clusterResp, true) + diags := setRootFields(d, cluster, true) if diags.HasError() { return diags } @@ -667,15 +620,31 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return nil } +// GetReplicationSpecAttributesFromOldAPI returns the id and num shard values of replication specs coming from old API. This is used to populate replication_specs.*.id and replication_specs.*.num_shard attributes for old sharding confirgurations. +// In the old API, each replications spec has a 1:1 relation with each zone, so ids and num shards are stored in a struct oldShardConfigMeta and are returned in a map from zoneName to oldShardConfigMeta. +func GetReplicationSpecAttributesFromOldAPI(ctx context.Context, projectID, clusterName string, client20240530 admin20240530.ClustersApi) (map[string]OldShardConfigMeta, error) { + clusterOldAPI, _, err := client20240530.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + readErrorMsg := "error reading advanced cluster with 2023-02-01 API (%s): %s" + return nil, fmt.Errorf(readErrorMsg, clusterName, err) + } + specs := clusterOldAPI.GetReplicationSpecs() + result := make(map[string]OldShardConfigMeta, len(specs)) + for _, spec := range specs { + result[spec.GetZoneName()] = OldShardConfigMeta{spec.GetId(), spec.GetNumShards()} + } + return result, nil +} + // getReplicationSpecIDsFromOldAPI returns the id values of replication specs coming from old API. This is used to populate old replication_specs.*.id attribute avoiding breaking changes. // In the old API each replications spec has a 1:1 relation with each zone, so ids are returned in a map from zoneName to id. func getReplicationSpecIDsFromOldAPI(ctx context.Context, projectID, clusterName string, connV220240530 *admin20240530.APIClient) (map[string]string, error) { clusterOldAPI, _, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if apiError, ok := admin20240530.AsError(err); ok { if apiError.GetErrorCode() == "ASYMMETRIC_SHARD_UNSUPPORTED" { - return nil, nil // if its the case of an asymmetric shard an error is expected in old API, replication_specs.*.id attribute will not be populated + return nil, nil // If it is the case of an asymmetric shard, an error is expected in old API and replication_specs.*.id attribute will not be populated. } - readErrorMsg := "error reading advanced cluster with 2023-02-01 API (%s): %s" + readErrorMsg := "error reading advanced cluster with 2023-02-01 API (%s): %s" return nil, fmt.Errorf(readErrorMsg, clusterName, err) } specs := clusterOldAPI.GetReplicationSpecs() @@ -686,7 +655,7 @@ func getReplicationSpecIDsFromOldAPI(ctx context.Context, projectID, clusterName return result, nil } -// getZoneIDsFromNewAPI returns the zone id values of replication specs coming from new API. This is used to populate zone_id when old API is called in the read. +// getZoneIDsFromNewAPI returns the zone id values of replication specs coming from new API. This is used to populate zone_id when old API is called in DS read. func getZoneIDsFromNewAPI(cluster *admin.ClusterDescription20240805) (map[string]string, error) { specs := cluster.GetReplicationSpecs() result := make(map[string]string, len(specs)) @@ -729,6 +698,11 @@ func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription2024 return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "create_date", clusterName, err)) } + // root disk_size_gb defined for backwards compatibility avoiding breaking changes + if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(cluster)); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) + } + if err := d.Set("encryption_at_rest_provider", cluster.GetEncryptionAtRestProvider()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "encryption_at_rest_provider", clusterName, err)) } @@ -781,6 +755,13 @@ func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription2024 return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err)) } + if err := d.Set("replica_set_scaling_strategy", cluster.GetReplicaSetScalingStrategy()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replica_set_scaling_strategy", clusterName, err)) + } + if err := d.Set("redact_client_log_data", cluster.GetRedactClientLogData()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) + } + if err := d.Set("config_server_type", cluster.GetConfigServerType()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "config_server_type", clusterName, err)) } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index a6332855ae..14e474cd21 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -2,17 +2,22 @@ package advancedcluster_test import ( "context" + "errors" "fmt" + "net/http" "os" "regexp" "strconv" "testing" admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + mockadmin20240530 "go.mongodb.org/atlas-sdk/v20240530005/mockadmin" "go.mongodb.org/atlas-sdk/v20241023002/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" @@ -30,6 +35,61 @@ var ( configServerManagementModeAtlasManaged = "ATLAS_MANAGED" ) +func TestGetReplicationSpecAttributesFromOldAPI(t *testing.T) { + var ( + projectID = "11111" + clusterName = "testCluster" + ID = "111111" + numShard = 2 + zoneName = "ZoneName managed by Terraform" + ) + + testCases := map[string]struct { + mockCluster *admin20240530.AdvancedClusterDescription + mockResponse *http.Response + mockError error + expectedResult map[string]advancedcluster.OldShardConfigMeta + expectedError error + }{ + "Error in the API call": { + mockCluster: &admin20240530.AdvancedClusterDescription{}, + mockResponse: &http.Response{StatusCode: 400}, + mockError: errGeneric, + expectedError: errors.New("error reading advanced cluster with 2023-02-01 API (testCluster): generic"), + expectedResult: nil, + }, + "Successful": { + mockCluster: &admin20240530.AdvancedClusterDescription{ + ReplicationSpecs: &[]admin20240530.ReplicationSpec{ + { + NumShards: &numShard, + Id: &ID, + ZoneName: &zoneName, + }, + }, + }, + mockResponse: &http.Response{}, + mockError: nil, + expectedError: nil, + expectedResult: map[string]advancedcluster.OldShardConfigMeta{ + zoneName: {ID: ID, NumShard: numShard}, + }, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + testObject := mockadmin20240530.NewClustersApi(t) + + testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin20240530.GetClusterApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().GetClusterExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() + + result, err := advancedcluster.GetReplicationSpecAttributesFromOldAPI(context.Background(), projectID, clusterName, testObject) + assert.Equal(t, tc.expectedError, err) + assert.Equal(t, tc.expectedResult, result) + }) + } +} + func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) { acc.SkipIfTPFAdvancedCluster(t) var (