From dfa9d4a5409ec569fbd6489f2b5fa7e8f90fd49c Mon Sep 17 00:00:00 2001 From: Melanija Cvetic Date: Wed, 20 Nov 2024 13:09:17 +0000 Subject: [PATCH 1/5] change API call version --- .../advancedcluster/model_advanced_cluster.go | 225 ++++++++++++++++-- .../model_advanced_cluster_test.go | 5 +- .../model_sdk_version_conversion.go | 16 +- .../resource_advanced_cluster.go | 25 +- .../advancedcluster/resource_update_logic.go | 13 +- .../resource_update_logic_test.go | 101 ++++---- 6 files changed, 289 insertions(+), 96 deletions(-) diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index abb32a1c8b..07bf1e770a 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -401,7 +401,20 @@ func flattenTags(tags *[]admin.ResourceTag) []map[string]string { } // CheckRegionConfigsPriorityOrder will be deleted in CLOUDP-275825 -func CheckRegionConfigsPriorityOrder(regionConfigs []admin20240805.ReplicationSpec20240805) error { +func CheckRegionConfigsPriorityOrder(regionConfigs []admin.ReplicationSpec20240805) error { + for _, spec := range regionConfigs { + configs := spec.GetRegionConfigs() + for i := 0; i < len(configs)-1; i++ { + if configs[i].GetPriority() < configs[i+1].GetPriority() { + return errors.New("priority values in region_configs must be in descending order") + } + } + } + return nil +} + +// CheckRegionConfigsPriorityOrder will be deleted in CLOUDP-275825 +func CheckRegionConfigsPriorityOrder20240805(regionConfigs []admin20240805.ReplicationSpec20240805) error { for _, spec := range regionConfigs { configs := spec.GetRegionConfigs() for i := 0; i < len(configs)-1; i++ { @@ -473,7 +486,20 @@ func flattenBiConnectorConfig(biConnector *admin.BiConnector) []map[string]any { } } -func expandBiConnectorConfig(d *schema.ResourceData) *admin20240805.BiConnector { +func expandBiConnectorConfig(d *schema.ResourceData) *admin.BiConnector { + if v, ok := d.GetOk("bi_connector_config"); ok { + if biConn := v.([]any); len(biConn) > 0 { + biConnMap := biConn[0].(map[string]any) + return &admin.BiConnector{ + Enabled: conversion.Pointer(cast.ToBool(biConnMap["enabled"])), + ReadPreference: conversion.StringPtr(cast.ToString(biConnMap["read_preference"])), + } + } + } + return nil +} + +func expandBiConnectorConfig20240805(d *schema.ResourceData) *admin20240805.BiConnector { if v, ok := d.GetOk("bi_connector_config"); ok { if biConn := v.([]any); len(biConn) > 0 { biConnMap := biConn[0].(map[string]any) @@ -695,7 +721,20 @@ func hwSpecToDedicatedHwSpec(apiObject *admin.HardwareSpec20240805) *admin.Dedic } } -func dedicatedHwSpecToHwSpec(apiObject *admin20240805.DedicatedHardwareSpec20240805) *admin20240805.HardwareSpec20240805 { +func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec20240805) *admin.HardwareSpec20240805 { + if apiObject == nil { + return nil + } + return &admin.HardwareSpec20240805{ + DiskSizeGB: apiObject.DiskSizeGB, + NodeCount: apiObject.NodeCount, + DiskIOPS: apiObject.DiskIOPS, + EbsVolumeType: apiObject.EbsVolumeType, + InstanceSize: apiObject.InstanceSize, + } +} + +func dedicatedHwSpecToHwSpec20240805(apiObject *admin20240805.DedicatedHardwareSpec20240805) *admin20240805.HardwareSpec20240805 { if apiObject == nil { return nil } @@ -872,7 +911,24 @@ func IsChangeStreamOptionsMinRequiredMajorVersion(input *string) bool { return value >= minVersionForChangeStreamOptions } -func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin20240805.ComponentLabel, diag.Diagnostics) { +func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin.ComponentLabel, diag.Diagnostics) { + list := d.Get("labels").(*schema.Set) + res := make([]admin.ComponentLabel, list.Len()) + for i, val := range list.List() { + v := val.(map[string]any) + key := v["key"].(string) + if key == ignoreLabel { + return nil, diag.FromErr(fmt.Errorf("you should not set `Infrastructure Tool` label, it is used for internal purposes")) + } + res[i] = admin.ComponentLabel{ + Key: conversion.StringPtr(key), + Value: conversion.StringPtr(v["value"].(string)), + } + } + return res, nil +} + +func expandLabelSliceFromSetSchema20240805(d *schema.ResourceData) ([]admin20240805.ComponentLabel, diag.Diagnostics) { list := d.Get("labels").(*schema.Set) res := make([]admin20240805.ComponentLabel, list.Len()) for i, val := range list.List() { @@ -889,8 +945,8 @@ func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin20240805.Comp return res, nil } -func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]admin20240805.ReplicationSpec20240805 { - var apiObjects []admin20240805.ReplicationSpec20240805 +func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]admin.ReplicationSpec20240805 { + var apiObjects []admin.ReplicationSpec20240805 for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { @@ -911,6 +967,28 @@ func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]ad return &apiObjects } +func expandAdvancedReplicationSpecs20240805(tfList []any, rootDiskSizeGB *float64) *[]admin20240805.ReplicationSpec20240805 { + var apiObjects []admin20240805.ReplicationSpec20240805 + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok || tfMap == nil { + continue + } + apiObject := expandAdvancedReplicationSpec20240805(tfMap, rootDiskSizeGB) + apiObjects = append(apiObjects, *apiObject) + + // handles adding additional replication spec objects if legacy num_shards attribute is being used and greater than 1 + numShards := tfMap["num_shards"].(int) + for range numShards - 1 { + apiObjects = append(apiObjects, *apiObject) + } + } + if apiObjects == nil { + return nil + } + return &apiObjects +} + func expandAdvancedReplicationSpecsOldSDK(tfList []any) *[]admin20240530.ReplicationSpec { var apiObjects []admin20240530.ReplicationSpec for _, tfMapRaw := range tfList { @@ -927,8 +1005,8 @@ func expandAdvancedReplicationSpecsOldSDK(tfList []any) *[]admin20240530.Replica return &apiObjects } -func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64) *admin20240805.ReplicationSpec20240805 { - apiObject := &admin20240805.ReplicationSpec20240805{ +func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64) *admin.ReplicationSpec20240805 { + apiObject := &admin.ReplicationSpec20240805{ ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), RegionConfigs: expandRegionConfigs(tfMap["region_configs"].([]any), rootDiskSizeGB), } @@ -938,6 +1016,17 @@ func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64 return apiObject } +func expandAdvancedReplicationSpec20240805(tfMap map[string]any, rootDiskSizeGB *float64) *admin20240805.ReplicationSpec20240805 { + apiObject := &admin20240805.ReplicationSpec20240805{ + ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), + RegionConfigs: expandRegionConfigs20240805(tfMap["region_configs"].([]any), rootDiskSizeGB), + } + if tfMap["external_id"].(string) != "" { + apiObject.Id = conversion.StringPtr(tfMap["external_id"].(string)) + } + return apiObject +} + func expandAdvancedReplicationSpecOldSDK(tfMap map[string]any) *admin20240530.ReplicationSpec { apiObject := &admin20240530.ReplicationSpec{ NumShards: conversion.Pointer(tfMap["num_shards"].(int)), @@ -950,8 +1039,8 @@ func expandAdvancedReplicationSpecOldSDK(tfMap map[string]any) *admin20240530.Re return apiObject } -func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin20240805.CloudRegionConfig20240805 { - var apiObjects []admin20240805.CloudRegionConfig20240805 +func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin.CloudRegionConfig20240805 { + var apiObjects []admin.CloudRegionConfig20240805 for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { @@ -966,9 +1055,25 @@ func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin20240805 return &apiObjects } -func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin20240805.CloudRegionConfig20240805 { +func expandRegionConfigs20240805(tfList []any, rootDiskSizeGB *float64) *[]admin20240805.CloudRegionConfig20240805 { + var apiObjects []admin20240805.CloudRegionConfig20240805 + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok || tfMap == nil { + continue + } + apiObject := expandRegionConfig20240805(tfMap, rootDiskSizeGB) + apiObjects = append(apiObjects, *apiObject) + } + if apiObjects == nil { + return nil + } + return &apiObjects +} + +func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin.CloudRegionConfig20240805 { providerName := tfMap["provider_name"].(string) - apiObject := &admin20240805.CloudRegionConfig20240805{ + apiObject := &admin.CloudRegionConfig20240805{ Priority: conversion.Pointer(cast.ToInt(tfMap["priority"])), ProviderName: conversion.StringPtr(providerName), RegionName: conversion.StringPtr(tfMap["region_name"].(string)), @@ -995,7 +1100,68 @@ func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin202 return apiObject } -func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *float64) *admin20240805.DedicatedHardwareSpec20240805 { +func expandRegionConfig20240805(tfMap map[string]any, rootDiskSizeGB *float64) *admin20240805.CloudRegionConfig20240805 { + providerName := tfMap["provider_name"].(string) + apiObject := &admin20240805.CloudRegionConfig20240805{ + Priority: conversion.Pointer(cast.ToInt(tfMap["priority"])), + ProviderName: conversion.StringPtr(providerName), + RegionName: conversion.StringPtr(tfMap["region_name"].(string)), + } + + if v, ok := tfMap["analytics_specs"]; ok && len(v.([]any)) > 0 { + apiObject.AnalyticsSpecs = expandRegionConfigSpec20240805(v.([]any), providerName, rootDiskSizeGB) + } + if v, ok := tfMap["electable_specs"]; ok && len(v.([]any)) > 0 { + apiObject.ElectableSpecs = dedicatedHwSpecToHwSpec20240805(expandRegionConfigSpec20240805(v.([]any), providerName, rootDiskSizeGB)) + } + if v, ok := tfMap["read_only_specs"]; ok && len(v.([]any)) > 0 { + apiObject.ReadOnlySpecs = expandRegionConfigSpec20240805(v.([]any), providerName, rootDiskSizeGB) + } + if v, ok := tfMap["auto_scaling"]; ok && len(v.([]any)) > 0 { + apiObject.AutoScaling = expandRegionConfigAutoScaling20240805(v.([]any)) + } + if v, ok := tfMap["analytics_auto_scaling"]; ok && len(v.([]any)) > 0 { + apiObject.AnalyticsAutoScaling = expandRegionConfigAutoScaling20240805(v.([]any)) + } + if v, ok := tfMap["backing_provider_name"]; ok { + apiObject.BackingProviderName = conversion.StringPtr(v.(string)) + } + return apiObject +} + +func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *float64) *admin.DedicatedHardwareSpec20240805 { + tfMap, _ := tfList[0].(map[string]any) + apiObject := new(admin.DedicatedHardwareSpec20240805) + if providerName == constant.AWS || providerName == constant.AZURE { + if v, ok := tfMap["disk_iops"]; ok && v.(int) > 0 { + apiObject.DiskIOPS = conversion.Pointer(v.(int)) + } + } + if providerName == constant.AWS { + if v, ok := tfMap["ebs_volume_type"]; ok { + apiObject.EbsVolumeType = conversion.StringPtr(v.(string)) + } + } + if v, ok := tfMap["instance_size"]; ok { + apiObject.InstanceSize = conversion.StringPtr(v.(string)) + } + if v, ok := tfMap["node_count"]; ok { + apiObject.NodeCount = conversion.Pointer(v.(int)) + } + + if v, ok := tfMap["disk_size_gb"]; ok && v.(float64) != 0 { + apiObject.DiskSizeGB = conversion.Pointer(v.(float64)) + } + + // value defined in root is set if it is defined in the create, or value has changed in the update. + if rootDiskSizeGB != nil { + apiObject.DiskSizeGB = rootDiskSizeGB + } + + return apiObject +} + +func expandRegionConfigSpec20240805(tfList []any, providerName string, rootDiskSizeGB *float64) *admin20240805.DedicatedHardwareSpec20240805 { tfMap, _ := tfList[0].(map[string]any) apiObject := new(admin20240805.DedicatedHardwareSpec20240805) if providerName == constant.AWS || providerName == constant.AZURE { @@ -1027,7 +1193,38 @@ func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *f return apiObject } -func expandRegionConfigAutoScaling(tfList []any) *admin20240805.AdvancedAutoScalingSettings { +func expandRegionConfigAutoScaling(tfList []any) *admin.AdvancedAutoScalingSettings { + tfMap, _ := tfList[0].(map[string]any) + settings := admin.AdvancedAutoScalingSettings{ + DiskGB: new(admin.DiskGBAutoScaling), + Compute: new(admin.AdvancedComputeAutoScaling), + } + + if v, ok := tfMap["disk_gb_enabled"]; ok { + settings.DiskGB.Enabled = conversion.Pointer(v.(bool)) + } + if v, ok := tfMap["compute_enabled"]; ok { + settings.Compute.Enabled = conversion.Pointer(v.(bool)) + } + if v, ok := tfMap["compute_scale_down_enabled"]; ok { + settings.Compute.ScaleDownEnabled = conversion.Pointer(v.(bool)) + } + if v, ok := tfMap["compute_min_instance_size"]; ok { + value := settings.Compute.ScaleDownEnabled + if *value { + settings.Compute.MinInstanceSize = conversion.StringPtr(v.(string)) + } + } + if v, ok := tfMap["compute_max_instance_size"]; ok { + value := settings.Compute.Enabled + if *value { + settings.Compute.MaxInstanceSize = conversion.StringPtr(v.(string)) + } + } + return &settings +} + +func expandRegionConfigAutoScaling20240805(tfList []any) *admin20240805.AdvancedAutoScalingSettings { tfMap, _ := tfList[0].(map[string]any) settings := admin20240805.AdvancedAutoScalingSettings{ DiskGB: new(admin20240805.DiskGBAutoScaling), diff --git a/internal/service/advancedcluster/model_advanced_cluster_test.go b/internal/service/advancedcluster/model_advanced_cluster_test.go index ecef34a418..7dd517c469 100644 --- a/internal/service/advancedcluster/model_advanced_cluster_test.go +++ b/internal/service/advancedcluster/model_advanced_cluster_test.go @@ -8,7 +8,6 @@ import ( "testing" admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" "go.mongodb.org/atlas-sdk/v20241023002/admin" "go.mongodb.org/atlas-sdk/v20241023002/mockadmin" @@ -451,13 +450,13 @@ func TestCheckRegionConfigsPriorityOrder(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - configs := make([]admin20240805.CloudRegionConfig20240805, len(tc.priorities)) + configs := make([]admin.CloudRegionConfig20240805, len(tc.priorities)) configsOld := make([]admin20240530.CloudRegionConfig, len(tc.priorities)) for i, priority := range tc.priorities { configs[i].Priority = conversion.IntPtr(priority) configsOld[i].Priority = conversion.IntPtr(priority) } - err := advancedcluster.CheckRegionConfigsPriorityOrder([]admin20240805.ReplicationSpec20240805{{RegionConfigs: &configs}}) + err := advancedcluster.CheckRegionConfigsPriorityOrder([]admin.ReplicationSpec20240805{{RegionConfigs: &configs}}) assert.Equal(t, tc.errorExpected, err != nil) err = advancedcluster.CheckRegionConfigsPriorityOrderOld([]admin20240530.ReplicationSpec{{RegionConfigs: &configsOld}}) assert.Equal(t, tc.errorExpected, err != nil) diff --git a/internal/service/advancedcluster/model_sdk_version_conversion.go b/internal/service/advancedcluster/model_sdk_version_conversion.go index 8226efb633..4d2c76191e 100644 --- a/internal/service/advancedcluster/model_sdk_version_conversion.go +++ b/internal/service/advancedcluster/model_sdk_version_conversion.go @@ -48,7 +48,7 @@ func convertTagsToLatest(tags []admin20240530.ResourceTag) []admin.ResourceTag { return results } -func convertBiConnectToOldSDK(biconnector *admin20240805.BiConnector) *admin20240530.BiConnector { +func convertBiConnectToOldSDK(biconnector *admin.BiConnector) *admin20240530.BiConnector { if biconnector == nil { return nil } @@ -126,7 +126,7 @@ func convertLabelsToLatest(labels *[]admin20240530.ComponentLabel) *[]admin.Comp return &results } -func convertLabelSliceToOldSDK(slice []admin20240805.ComponentLabel, err diag.Diagnostics) ([]admin20240530.ComponentLabel, diag.Diagnostics) { +func convertLabelSliceToOldSDK(slice []admin.ComponentLabel, err diag.Diagnostics) ([]admin20240530.ComponentLabel, diag.Diagnostics) { if err != nil { return nil, err } @@ -141,7 +141,7 @@ func convertLabelSliceToOldSDK(slice []admin20240805.ComponentLabel, err diag.Di return results, nil } -func convertRegionConfigSliceToOldSDK(slice *[]admin20240805.CloudRegionConfig20240805) *[]admin20240530.CloudRegionConfig { +func convertRegionConfigSliceToOldSDK(slice *[]admin.CloudRegionConfig20240805) *[]admin20240530.CloudRegionConfig { if slice == nil { return nil } @@ -164,7 +164,7 @@ func convertRegionConfigSliceToOldSDK(slice *[]admin20240805.CloudRegionConfig20 return &results } -func convertHardwareSpecToOldSDK(hwspec *admin20240805.HardwareSpec20240805) *admin20240530.HardwareSpec { +func convertHardwareSpecToOldSDK(hwspec *admin.HardwareSpec20240805) *admin20240530.HardwareSpec { if hwspec == nil { return nil } @@ -176,7 +176,7 @@ func convertHardwareSpecToOldSDK(hwspec *admin20240805.HardwareSpec20240805) *ad } } -func convertAdvancedAutoScalingSettingsToOldSDK(settings *admin20240805.AdvancedAutoScalingSettings) *admin20240530.AdvancedAutoScalingSettings { +func convertAdvancedAutoScalingSettingsToOldSDK(settings *admin.AdvancedAutoScalingSettings) *admin20240530.AdvancedAutoScalingSettings { if settings == nil { return nil } @@ -186,7 +186,7 @@ func convertAdvancedAutoScalingSettingsToOldSDK(settings *admin20240805.Advanced } } -func convertAdvancedComputeAutoScalingToOldSDK(settings *admin20240805.AdvancedComputeAutoScaling) *admin20240530.AdvancedComputeAutoScaling { +func convertAdvancedComputeAutoScalingToOldSDK(settings *admin.AdvancedComputeAutoScaling) *admin20240530.AdvancedComputeAutoScaling { if settings == nil { return nil } @@ -198,7 +198,7 @@ func convertAdvancedComputeAutoScalingToOldSDK(settings *admin20240805.AdvancedC } } -func convertDiskGBAutoScalingToOldSDK(settings *admin20240805.DiskGBAutoScaling) *admin20240530.DiskGBAutoScaling { +func convertDiskGBAutoScalingToOldSDK(settings *admin.DiskGBAutoScaling) *admin20240530.DiskGBAutoScaling { if settings == nil { return nil } @@ -207,7 +207,7 @@ func convertDiskGBAutoScalingToOldSDK(settings *admin20240805.DiskGBAutoScaling) } } -func convertDedicatedHardwareSpecToOldSDK(spec *admin20240805.DedicatedHardwareSpec20240805) *admin20240530.DedicatedHardwareSpec { +func convertDedicatedHardwareSpecToOldSDK(spec *admin.DedicatedHardwareSpec20240805) *admin20240530.DedicatedHardwareSpec { if spec == nil { return nil } diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 5e12803397..0a3b4b3827 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -419,14 +419,14 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. params := &admin20240805.ClusterDescription20240805{ Name: conversion.StringPtr(cast.ToString(d.Get("name"))), ClusterType: conversion.StringPtr(cast.ToString(d.Get("cluster_type"))), - ReplicationSpecs: expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any), rootDiskSizeGB), + ReplicationSpecs: expandAdvancedReplicationSpecs20240805(d.Get("replication_specs").([]any), rootDiskSizeGB), } if v, ok := d.GetOk("backup_enabled"); ok { params.BackupEnabled = conversion.Pointer(v.(bool)) } if _, ok := d.GetOk("bi_connector_config"); ok { - params.BiConnector = expandBiConnectorConfig(d) + params.BiConnector = expandBiConnectorConfig20240805(d) } if v, ok := d.GetOk("encryption_at_rest_provider"); ok { @@ -434,7 +434,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if _, ok := d.GetOk("labels"); ok { - labels, err := expandLabelSliceFromSetSchema(d) + labels, err := expandLabelSliceFromSetSchema20240805(d) if err != nil { return err } @@ -479,7 +479,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - if err := CheckRegionConfigsPriorityOrder(params.GetReplicationSpecs()); err != nil { + if err := CheckRegionConfigsPriorityOrder20240805(params.GetReplicationSpecs()); err != nil { return diag.FromErr(err) } // cannot call latest API (2024-10-23 or newer) as it can enable ISS autoscaling @@ -821,7 +821,6 @@ func resourceUpgrade(ctx context.Context, upgradeRequest *admin.LegacyAtlasTenan func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 - connV220240805 := meta.(*config.MongoDBClient).AtlasV220240805 connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] @@ -850,7 +849,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. waitOnUpdate = true } if d.HasChange("replica_set_scaling_strategy") || d.HasChange("redact_client_log_data") || d.HasChange("config_server_management_mode") { - request := new(admin20240805.ClusterDescription20240805) + request := new(admin.ClusterDescription20240805) if d.HasChange("replica_set_scaling_strategy") { request.ReplicaSetScalingStrategy = conversion.Pointer(d.Get("replica_set_scaling_strategy").(string)) } @@ -861,7 +860,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. request.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string)) } // can call latest API (2024-10-23 or newer) as autoscaling property is not specified, using older version just for caution until iss autoscaling epic is done - if _, _, err := connV220240805.ClustersApi.UpdateCluster(ctx, projectID, clusterName, request).Execute(); err != nil { + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, request).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } waitOnUpdate = true @@ -882,7 +881,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(err) } // cannot call latest API (2024-10-23 or newer) as it can enable ISS autoscaling - if _, _, err := connV220240805.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { @@ -916,11 +915,11 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if d.Get("paused").(bool) { - clusterRequest := &admin20240805.ClusterDescription20240805{ + clusterRequest := &admin.ClusterDescription20240805{ Paused: conversion.Pointer(true), } // can call latest API (2024-10-23 or newer) as autoscaling property is not specified, using older version just for caution until iss autoscaling epic is done - if _, _, err := connV220240805.ClustersApi.UpdateCluster(ctx, projectID, clusterName, clusterRequest).Execute(); err != nil { + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, clusterRequest).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { @@ -931,8 +930,8 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. return resourceRead(ctx, d, meta) } -func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clusterName string, connV2 *admin.APIClient) (*admin20240805.ClusterDescription20240805, diag.Diagnostics) { - cluster := new(admin20240805.ClusterDescription20240805) +func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clusterName string, connV2 *admin.APIClient) (*admin.ClusterDescription20240805, diag.Diagnostics) { + cluster := new(admin.ClusterDescription20240805) if d.HasChange("replication_specs") || d.HasChange("disk_size_gb") { var updatedDiskSizeGB *float64 @@ -979,7 +978,7 @@ func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clust } if d.HasChange("tags") { - cluster.Tags = conversion.ExpandTagsFromSetSchemaV220240805(d) + cluster.Tags = conversion.ExpandTagsFromSetSchema(d) } if d.HasChange("mongo_db_major_version") { diff --git a/internal/service/advancedcluster/resource_update_logic.go b/internal/service/advancedcluster/resource_update_logic.go index 1e643ab581..8caa468583 100644 --- a/internal/service/advancedcluster/resource_update_logic.go +++ b/internal/service/advancedcluster/resource_update_logic.go @@ -6,11 +6,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" "go.mongodb.org/atlas-sdk/v20241023002/admin" ) -func noIDsPopulatedInReplicationSpecs(replicationSpecs *[]admin20240805.ReplicationSpec20240805) bool { +func noIDsPopulatedInReplicationSpecs(replicationSpecs *[]admin.ReplicationSpec20240805) bool { if replicationSpecs == nil || len(*replicationSpecs) == 0 { return false } @@ -22,7 +21,7 @@ func noIDsPopulatedInReplicationSpecs(replicationSpecs *[]admin20240805.Replicat return true } -func populateIDValuesUsingNewAPI(ctx context.Context, projectID, clusterName string, connV2ClusterAPI admin.ClustersApi, replicationSpecs *[]admin20240805.ReplicationSpec20240805) (*[]admin20240805.ReplicationSpec20240805, diag.Diagnostics) { +func populateIDValuesUsingNewAPI(ctx context.Context, projectID, clusterName string, connV2ClusterAPI admin.ClustersApi, replicationSpecs *[]admin.ReplicationSpec20240805) (*[]admin.ReplicationSpec20240805, diag.Diagnostics) { if replicationSpecs == nil || len(*replicationSpecs) == 0 { return replicationSpecs, nil } @@ -36,7 +35,7 @@ func populateIDValuesUsingNewAPI(ctx context.Context, projectID, clusterName str return &result, nil } -func AddIDsToReplicationSpecs(replicationSpecs []admin20240805.ReplicationSpec20240805, zoneToReplicationSpecsIDs map[string][]string) []admin20240805.ReplicationSpec20240805 { +func AddIDsToReplicationSpecs(replicationSpecs []admin.ReplicationSpec20240805, zoneToReplicationSpecsIDs map[string][]string) []admin.ReplicationSpec20240805 { for zoneName, availableIDs := range zoneToReplicationSpecsIDs { var indexOfIDToUse = 0 for i := range replicationSpecs { @@ -65,12 +64,12 @@ func groupIDsByZone(specs []admin.ReplicationSpec20240805) map[string][]string { // - Existing replication specs can have the autoscaling values present in the state with default values even if not defined in the config (case when cluster is imported) // - API expects autoScaling and analyticsAutoScaling aligned cross all region configs in the PATCH request // This function is needed to avoid errors if a new replication spec is added, ensuring the PATCH request will have the auto scaling aligned with other replication specs when not present in config. -func SyncAutoScalingConfigs(replicationSpecs *[]admin20240805.ReplicationSpec20240805) { +func SyncAutoScalingConfigs(replicationSpecs *[]admin.ReplicationSpec20240805) { if replicationSpecs == nil || len(*replicationSpecs) == 0 { return } - var defaultAnalyticsAutoScaling, defaultAutoScaling *admin20240805.AdvancedAutoScalingSettings + var defaultAnalyticsAutoScaling, defaultAutoScaling *admin.AdvancedAutoScalingSettings for _, spec := range *replicationSpecs { for i := range *spec.RegionConfigs { @@ -86,7 +85,7 @@ func SyncAutoScalingConfigs(replicationSpecs *[]admin20240805.ReplicationSpec202 applyDefaultAutoScaling(replicationSpecs, defaultAutoScaling, defaultAnalyticsAutoScaling) } -func applyDefaultAutoScaling(replicationSpecs *[]admin20240805.ReplicationSpec20240805, defaultAutoScaling, defaultAnalyticsAutoScaling *admin20240805.AdvancedAutoScalingSettings) { +func applyDefaultAutoScaling(replicationSpecs *[]admin.ReplicationSpec20240805, defaultAutoScaling, defaultAnalyticsAutoScaling *admin.AdvancedAutoScalingSettings) { for _, spec := range *replicationSpecs { for i := range *spec.RegionConfigs { regionConfig := &(*spec.RegionConfigs)[i] diff --git a/internal/service/advancedcluster/resource_update_logic_test.go b/internal/service/advancedcluster/resource_update_logic_test.go index 1528450bbc..a53cbce6fd 100644 --- a/internal/service/advancedcluster/resource_update_logic_test.go +++ b/internal/service/advancedcluster/resource_update_logic_test.go @@ -5,18 +5,17 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/stretchr/testify/assert" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" "go.mongodb.org/atlas-sdk/v20241023002/admin" ) func TestAddIDsToReplicationSpecs(t *testing.T) { testCases := map[string]struct { - ReplicationSpecs []admin20240805.ReplicationSpec20240805 + ReplicationSpecs []admin.ReplicationSpec20240805 ZoneToReplicationSpecsIDs map[string][]string - ExpectedReplicationSpecs []admin20240805.ReplicationSpec20240805 + ExpectedReplicationSpecs []admin.ReplicationSpec20240805 }{ "two zones with same amount of available ids and replication specs to populate": { - ReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), }, @@ -34,7 +33,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { "Zone 1": {"zone1-id1", "zone1-id2"}, "Zone 2": {"zone2-id1", "zone2-id2"}, }, - ExpectedReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), Id: admin.PtrString("zone1-id1"), @@ -54,7 +53,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { }, }, "less available ids than replication specs to populate": { - ReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), }, @@ -72,7 +71,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { "Zone 1": {"zone1-id1"}, "Zone 2": {"zone2-id1"}, }, - ExpectedReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), Id: admin.PtrString("zone1-id1"), @@ -92,7 +91,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { }, }, "more available ids than replication specs to populate": { - ReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), }, @@ -104,7 +103,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { "Zone 1": {"zone1-id1", "zone1-id2"}, "Zone 2": {"zone2-id1", "zone2-id2"}, }, - ExpectedReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), Id: admin.PtrString("zone1-id1"), @@ -127,23 +126,23 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { func TestSyncAutoScalingConfigs(t *testing.T) { testCases := map[string]struct { - ReplicationSpecs []admin20240805.ReplicationSpec20240805 - ExpectedReplicationSpecs []admin20240805.ReplicationSpec20240805 + ReplicationSpecs []admin.ReplicationSpec20240805 + ExpectedReplicationSpecs []admin.ReplicationSpec20240805 }{ "apply same autoscaling options for new replication spec which does not have autoscaling defined": { - ReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, }, - AnalyticsAutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, @@ -153,7 +152,7 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: nil, AnalyticsAutoScaling: nil, @@ -161,19 +160,19 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, }, }, - ExpectedReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, }, - AnalyticsAutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, @@ -183,16 +182,16 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, }, - AnalyticsAutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, @@ -204,19 +203,19 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, // for this case the API will respond with an error and guide the user to align autoscaling options cross all nodes "when different autoscaling options are defined values will not be changed": { - ReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, }, - AnalyticsAutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(true), ScaleDownEnabled: admin.PtrBool(true), }, @@ -226,15 +225,15 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(true), }, }, - AnalyticsAutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), }, }, @@ -242,19 +241,19 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, }, }, - ExpectedReplicationSpecs: []admin20240805.ReplicationSpec20240805{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), ScaleDownEnabled: admin.PtrBool(false), }, }, - AnalyticsAutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(true), ScaleDownEnabled: admin.PtrBool(true), }, @@ -264,15 +263,15 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(true), }, }, - AnalyticsAutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ Enabled: admin.PtrBool(false), }, }, From 4c3ba0e27ecb90144fdf6aad65dcd5c0b3699360 Mon Sep 17 00:00:00 2001 From: Melanija Cvetic Date: Thu, 21 Nov 2024 11:25:41 +0000 Subject: [PATCH 2/5] removed temporary 20240805 functions and adjust create operation --- internal/common/conversion/flatten_expand.go | 14 -- .../advancedcluster/model_advanced_cluster.go | 198 ------------------ .../model_sdk_version_conversion.go | 3 +- .../resource_advanced_cluster.go | 18 +- 4 files changed, 10 insertions(+), 223 deletions(-) diff --git a/internal/common/conversion/flatten_expand.go b/internal/common/conversion/flatten_expand.go index a9ecb6b727..a28e5ec6fa 100644 --- a/internal/common/conversion/flatten_expand.go +++ b/internal/common/conversion/flatten_expand.go @@ -3,7 +3,6 @@ package conversion import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" "go.mongodb.org/atlas-sdk/v20241023002/admin" ) @@ -29,19 +28,6 @@ func FlattenTags(tags []admin.ResourceTag) []map[string]string { return ret } -func ExpandTagsFromSetSchemaV220240805(d *schema.ResourceData) *[]admin20240805.ResourceTag { - list := d.Get("tags").(*schema.Set) - ret := make([]admin20240805.ResourceTag, list.Len()) - for i, item := range list.List() { - tag := item.(map[string]any) - ret[i] = admin20240805.ResourceTag{ - Key: tag["key"].(string), - Value: tag["value"].(string), - } - } - return &ret -} - func ExpandTagsFromSetSchema(d *schema.ResourceData) *[]admin.ResourceTag { list := d.Get("tags").(*schema.Set) ret := make([]admin.ResourceTag, list.Len()) diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 07bf1e770a..10b95389b3 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -12,7 +12,6 @@ import ( "strings" admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" "go.mongodb.org/atlas-sdk/v20241023002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -413,19 +412,6 @@ func CheckRegionConfigsPriorityOrder(regionConfigs []admin.ReplicationSpec202408 return nil } -// CheckRegionConfigsPriorityOrder will be deleted in CLOUDP-275825 -func CheckRegionConfigsPriorityOrder20240805(regionConfigs []admin20240805.ReplicationSpec20240805) error { - for _, spec := range regionConfigs { - configs := spec.GetRegionConfigs() - for i := 0; i < len(configs)-1; i++ { - if configs[i].GetPriority() < configs[i+1].GetPriority() { - return errors.New("priority values in region_configs must be in descending order") - } - } - } - return nil -} - // CheckRegionConfigsPriorityOrderOld will be deleted in CLOUDP-275825 func CheckRegionConfigsPriorityOrderOld(regionConfigs []admin20240530.ReplicationSpec) error { for _, spec := range regionConfigs { @@ -499,19 +485,6 @@ func expandBiConnectorConfig(d *schema.ResourceData) *admin.BiConnector { return nil } -func expandBiConnectorConfig20240805(d *schema.ResourceData) *admin20240805.BiConnector { - if v, ok := d.GetOk("bi_connector_config"); ok { - if biConn := v.([]any); len(biConn) > 0 { - biConnMap := biConn[0].(map[string]any) - return &admin20240805.BiConnector{ - Enabled: conversion.Pointer(cast.ToBool(biConnMap["enabled"])), - ReadPreference: conversion.StringPtr(cast.ToString(biConnMap["read_preference"])), - } - } - } - return nil -} - func flattenProcessArgs(p20240530 *admin20240530.ClusterDescriptionProcessArgs, p *admin.ClusterDescriptionProcessArgs20240805) []map[string]any { if p20240530 == nil { return nil @@ -734,19 +707,6 @@ func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec20240805) *ad } } -func dedicatedHwSpecToHwSpec20240805(apiObject *admin20240805.DedicatedHardwareSpec20240805) *admin20240805.HardwareSpec20240805 { - if apiObject == nil { - return nil - } - return &admin20240805.HardwareSpec20240805{ - DiskSizeGB: apiObject.DiskSizeGB, - NodeCount: apiObject.NodeCount, - DiskIOPS: apiObject.DiskIOPS, - EbsVolumeType: apiObject.EbsVolumeType, - InstanceSize: apiObject.InstanceSize, - } -} - func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHardwareSpec20240805, providerName string, tfMapObjects []any) []map[string]any { if apiObject == nil { return nil @@ -928,23 +888,6 @@ func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin.ComponentLab return res, nil } -func expandLabelSliceFromSetSchema20240805(d *schema.ResourceData) ([]admin20240805.ComponentLabel, diag.Diagnostics) { - list := d.Get("labels").(*schema.Set) - res := make([]admin20240805.ComponentLabel, list.Len()) - for i, val := range list.List() { - v := val.(map[string]any) - key := v["key"].(string) - if key == ignoreLabel { - return nil, diag.FromErr(fmt.Errorf("you should not set `Infrastructure Tool` label, it is used for internal purposes")) - } - res[i] = admin20240805.ComponentLabel{ - Key: conversion.StringPtr(key), - Value: conversion.StringPtr(v["value"].(string)), - } - } - return res, nil -} - func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]admin.ReplicationSpec20240805 { var apiObjects []admin.ReplicationSpec20240805 for _, tfMapRaw := range tfList { @@ -967,28 +910,6 @@ func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]ad return &apiObjects } -func expandAdvancedReplicationSpecs20240805(tfList []any, rootDiskSizeGB *float64) *[]admin20240805.ReplicationSpec20240805 { - var apiObjects []admin20240805.ReplicationSpec20240805 - for _, tfMapRaw := range tfList { - tfMap, ok := tfMapRaw.(map[string]any) - if !ok || tfMap == nil { - continue - } - apiObject := expandAdvancedReplicationSpec20240805(tfMap, rootDiskSizeGB) - apiObjects = append(apiObjects, *apiObject) - - // handles adding additional replication spec objects if legacy num_shards attribute is being used and greater than 1 - numShards := tfMap["num_shards"].(int) - for range numShards - 1 { - apiObjects = append(apiObjects, *apiObject) - } - } - if apiObjects == nil { - return nil - } - return &apiObjects -} - func expandAdvancedReplicationSpecsOldSDK(tfList []any) *[]admin20240530.ReplicationSpec { var apiObjects []admin20240530.ReplicationSpec for _, tfMapRaw := range tfList { @@ -1016,17 +937,6 @@ func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64 return apiObject } -func expandAdvancedReplicationSpec20240805(tfMap map[string]any, rootDiskSizeGB *float64) *admin20240805.ReplicationSpec20240805 { - apiObject := &admin20240805.ReplicationSpec20240805{ - ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), - RegionConfigs: expandRegionConfigs20240805(tfMap["region_configs"].([]any), rootDiskSizeGB), - } - if tfMap["external_id"].(string) != "" { - apiObject.Id = conversion.StringPtr(tfMap["external_id"].(string)) - } - return apiObject -} - func expandAdvancedReplicationSpecOldSDK(tfMap map[string]any) *admin20240530.ReplicationSpec { apiObject := &admin20240530.ReplicationSpec{ NumShards: conversion.Pointer(tfMap["num_shards"].(int)), @@ -1055,22 +965,6 @@ func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin.CloudRe return &apiObjects } -func expandRegionConfigs20240805(tfList []any, rootDiskSizeGB *float64) *[]admin20240805.CloudRegionConfig20240805 { - var apiObjects []admin20240805.CloudRegionConfig20240805 - for _, tfMapRaw := range tfList { - tfMap, ok := tfMapRaw.(map[string]any) - if !ok || tfMap == nil { - continue - } - apiObject := expandRegionConfig20240805(tfMap, rootDiskSizeGB) - apiObjects = append(apiObjects, *apiObject) - } - if apiObjects == nil { - return nil - } - return &apiObjects -} - func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin.CloudRegionConfig20240805 { providerName := tfMap["provider_name"].(string) apiObject := &admin.CloudRegionConfig20240805{ @@ -1100,35 +994,6 @@ func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin.Cl return apiObject } -func expandRegionConfig20240805(tfMap map[string]any, rootDiskSizeGB *float64) *admin20240805.CloudRegionConfig20240805 { - providerName := tfMap["provider_name"].(string) - apiObject := &admin20240805.CloudRegionConfig20240805{ - Priority: conversion.Pointer(cast.ToInt(tfMap["priority"])), - ProviderName: conversion.StringPtr(providerName), - RegionName: conversion.StringPtr(tfMap["region_name"].(string)), - } - - if v, ok := tfMap["analytics_specs"]; ok && len(v.([]any)) > 0 { - apiObject.AnalyticsSpecs = expandRegionConfigSpec20240805(v.([]any), providerName, rootDiskSizeGB) - } - if v, ok := tfMap["electable_specs"]; ok && len(v.([]any)) > 0 { - apiObject.ElectableSpecs = dedicatedHwSpecToHwSpec20240805(expandRegionConfigSpec20240805(v.([]any), providerName, rootDiskSizeGB)) - } - if v, ok := tfMap["read_only_specs"]; ok && len(v.([]any)) > 0 { - apiObject.ReadOnlySpecs = expandRegionConfigSpec20240805(v.([]any), providerName, rootDiskSizeGB) - } - if v, ok := tfMap["auto_scaling"]; ok && len(v.([]any)) > 0 { - apiObject.AutoScaling = expandRegionConfigAutoScaling20240805(v.([]any)) - } - if v, ok := tfMap["analytics_auto_scaling"]; ok && len(v.([]any)) > 0 { - apiObject.AnalyticsAutoScaling = expandRegionConfigAutoScaling20240805(v.([]any)) - } - if v, ok := tfMap["backing_provider_name"]; ok { - apiObject.BackingProviderName = conversion.StringPtr(v.(string)) - } - return apiObject -} - func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *float64) *admin.DedicatedHardwareSpec20240805 { tfMap, _ := tfList[0].(map[string]any) apiObject := new(admin.DedicatedHardwareSpec20240805) @@ -1161,38 +1026,6 @@ func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *f return apiObject } -func expandRegionConfigSpec20240805(tfList []any, providerName string, rootDiskSizeGB *float64) *admin20240805.DedicatedHardwareSpec20240805 { - tfMap, _ := tfList[0].(map[string]any) - apiObject := new(admin20240805.DedicatedHardwareSpec20240805) - if providerName == constant.AWS || providerName == constant.AZURE { - if v, ok := tfMap["disk_iops"]; ok && v.(int) > 0 { - apiObject.DiskIOPS = conversion.Pointer(v.(int)) - } - } - if providerName == constant.AWS { - if v, ok := tfMap["ebs_volume_type"]; ok { - apiObject.EbsVolumeType = conversion.StringPtr(v.(string)) - } - } - if v, ok := tfMap["instance_size"]; ok { - apiObject.InstanceSize = conversion.StringPtr(v.(string)) - } - if v, ok := tfMap["node_count"]; ok { - apiObject.NodeCount = conversion.Pointer(v.(int)) - } - - if v, ok := tfMap["disk_size_gb"]; ok && v.(float64) != 0 { - apiObject.DiskSizeGB = conversion.Pointer(v.(float64)) - } - - // value defined in root is set if it is defined in the create, or value has changed in the update. - if rootDiskSizeGB != nil { - apiObject.DiskSizeGB = rootDiskSizeGB - } - - return apiObject -} - func expandRegionConfigAutoScaling(tfList []any) *admin.AdvancedAutoScalingSettings { tfMap, _ := tfList[0].(map[string]any) settings := admin.AdvancedAutoScalingSettings{ @@ -1224,37 +1057,6 @@ func expandRegionConfigAutoScaling(tfList []any) *admin.AdvancedAutoScalingSetti return &settings } -func expandRegionConfigAutoScaling20240805(tfList []any) *admin20240805.AdvancedAutoScalingSettings { - tfMap, _ := tfList[0].(map[string]any) - settings := admin20240805.AdvancedAutoScalingSettings{ - DiskGB: new(admin20240805.DiskGBAutoScaling), - Compute: new(admin20240805.AdvancedComputeAutoScaling), - } - - if v, ok := tfMap["disk_gb_enabled"]; ok { - settings.DiskGB.Enabled = conversion.Pointer(v.(bool)) - } - if v, ok := tfMap["compute_enabled"]; ok { - settings.Compute.Enabled = conversion.Pointer(v.(bool)) - } - if v, ok := tfMap["compute_scale_down_enabled"]; ok { - settings.Compute.ScaleDownEnabled = conversion.Pointer(v.(bool)) - } - if v, ok := tfMap["compute_min_instance_size"]; ok { - value := settings.Compute.ScaleDownEnabled - if *value { - settings.Compute.MinInstanceSize = conversion.StringPtr(v.(string)) - } - } - if v, ok := tfMap["compute_max_instance_size"]; ok { - value := settings.Compute.Enabled - if *value { - settings.Compute.MaxInstanceSize = conversion.StringPtr(v.(string)) - } - } - return &settings -} - func flattenAdvancedReplicationSpecsDS(ctx context.Context, apiRepSpecs []admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { if len(apiRepSpecs) == 0 { return nil, nil diff --git a/internal/service/advancedcluster/model_sdk_version_conversion.go b/internal/service/advancedcluster/model_sdk_version_conversion.go index 4d2c76191e..292cc71136 100644 --- a/internal/service/advancedcluster/model_sdk_version_conversion.go +++ b/internal/service/advancedcluster/model_sdk_version_conversion.go @@ -2,7 +2,6 @@ package advancedcluster import ( admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" "go.mongodb.org/atlas-sdk/v20241023002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -20,7 +19,7 @@ func convertTagsPtrToLatest(tags *[]admin20240530.ResourceTag) *[]admin.Resource return &result } -func convertTagsPtrToOldSDK(tags *[]admin20240805.ResourceTag) *[]admin20240530.ResourceTag { +func convertTagsPtrToOldSDK(tags *[]admin.ResourceTag) *[]admin20240530.ResourceTag { if tags == nil { return nil } diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 0a3b4b3827..fbcfac6654 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -416,17 +416,17 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. rootDiskSizeGB = conversion.Pointer(v.(float64)) } - params := &admin20240805.ClusterDescription20240805{ + params := &admin.ClusterDescription20240805{ Name: conversion.StringPtr(cast.ToString(d.Get("name"))), ClusterType: conversion.StringPtr(cast.ToString(d.Get("cluster_type"))), - ReplicationSpecs: expandAdvancedReplicationSpecs20240805(d.Get("replication_specs").([]any), rootDiskSizeGB), + ReplicationSpecs: expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any), rootDiskSizeGB), } if v, ok := d.GetOk("backup_enabled"); ok { params.BackupEnabled = conversion.Pointer(v.(bool)) } if _, ok := d.GetOk("bi_connector_config"); ok { - params.BiConnector = expandBiConnectorConfig20240805(d) + params.BiConnector = expandBiConnectorConfig(d) } if v, ok := d.GetOk("encryption_at_rest_provider"); ok { @@ -434,7 +434,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if _, ok := d.GetOk("labels"); ok { - labels, err := expandLabelSliceFromSetSchema20240805(d) + labels, err := expandLabelSliceFromSetSchema(d) if err != nil { return err } @@ -442,7 +442,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if _, ok := d.GetOk("tags"); ok { - params.Tags = conversion.ExpandTagsFromSetSchemaV220240805(d) + params.Tags = conversion.ExpandTagsFromSetSchema(d) } if v, ok := d.GetOk("mongo_db_major_version"); ok { params.MongoDBMajorVersion = conversion.StringPtr(FormatMongoDBMajorVersion(v.(string))) @@ -479,11 +479,11 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - if err := CheckRegionConfigsPriorityOrder20240805(params.GetReplicationSpecs()); err != nil { + if err := CheckRegionConfigsPriorityOrder(params.GetReplicationSpecs()); err != nil { return diag.FromErr(err) } - // cannot call latest API (2024-10-23 or newer) as it can enable ISS autoscaling - cluster, _, err := connV220240805.ClustersApi.CreateCluster(ctx, projectID, params).Execute() + + cluster, _, err := connV2.ClustersApi.CreateCluster(ctx, projectID, params).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) } @@ -1072,7 +1072,7 @@ func updateRequestOldAPI(d *schema.ResourceData, clusterName string) (*admin2024 } if d.HasChange("tags") { - cluster.Tags = convertTagsPtrToOldSDK(conversion.ExpandTagsFromSetSchemaV220240805(d)) + cluster.Tags = convertTagsPtrToOldSDK(conversion.ExpandTagsFromSetSchema(d)) } if d.HasChange("mongo_db_major_version") { From f3f4f69f29a8c4ffdef40a6a374a6bd90ef72417 Mon Sep 17 00:00:00 2001 From: Melanija Cvetic Date: Thu, 21 Nov 2024 11:53:50 +0000 Subject: [PATCH 3/5] added changelog --- .changelog/2814.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/2814.txt diff --git a/.changelog/2814.txt b/.changelog/2814.txt new file mode 100644 index 0000000000..00932eb6d0 --- /dev/null +++ b/.changelog/2814.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/mongodbatlas_advanced_cluster: Adjusts update operation to call to latest SDK version to enable ISS +``` From 135ec7ae8522979e46253acc3aadb776e6954dec Mon Sep 17 00:00:00 2001 From: Melanija Cvetic Date: Thu, 21 Nov 2024 13:23:51 +0000 Subject: [PATCH 4/5] adjusted comments --- internal/service/advancedcluster/resource_advanced_cluster.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index fbcfac6654..f2cda4256c 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -859,7 +859,6 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if d.HasChange("config_server_management_mode") { request.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string)) } - // can call latest API (2024-10-23 or newer) as autoscaling property is not specified, using older version just for caution until iss autoscaling epic is done if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, request).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } @@ -880,7 +879,6 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if err := CheckRegionConfigsPriorityOrder(req.GetReplicationSpecs()); err != nil { return diag.FromErr(err) } - // cannot call latest API (2024-10-23 or newer) as it can enable ISS autoscaling if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } @@ -918,7 +916,6 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. clusterRequest := &admin.ClusterDescription20240805{ Paused: conversion.Pointer(true), } - // can call latest API (2024-10-23 or newer) as autoscaling property is not specified, using older version just for caution until iss autoscaling epic is done if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, clusterRequest).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } From 304a2099713089c3d1f37fe003624a2ab9c65f8a Mon Sep 17 00:00:00 2001 From: Melanija Cvetic Date: Fri, 22 Nov 2024 09:23:10 +0000 Subject: [PATCH 5/5] Implements review feedback --- .changelog/2814.txt | 2 +- internal/service/advancedcluster/resource_advanced_cluster.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.changelog/2814.txt b/.changelog/2814.txt index 00932eb6d0..71ab0a867c 100644 --- a/.changelog/2814.txt +++ b/.changelog/2814.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/mongodbatlas_advanced_cluster: Adjusts update operation to call to latest SDK version to enable ISS +resource/mongodbatlas_advanced_cluster: Adjusts update operation to support cluster tier auto scaling per shard. ``` diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index f2cda4256c..38b53ab5cc 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -859,6 +859,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if d.HasChange("config_server_management_mode") { request.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string)) } + // can call latest API (2024-10-23 or newer) as replications specs with autoscaling property is not specified if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, request).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) }