Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Updates singular mongodbatlas_advanced_cluster data source to support independent shard scaling & updates relevant flattener methods #2373

Merged
merged 8 commits into from
Jun 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
130 changes: 98 additions & 32 deletions internal/service/advancedcluster/data_source_advanced_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,12 @@ import (
"fmt"
"net/http"

admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin"
"go.mongodb.org/atlas-sdk/v20240530001/admin"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"

"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
Expand All @@ -20,6 +24,10 @@ func DataSource() *schema.Resource {
Type: schema.TypeString,
Required: true,
},
"use_replication_spec_per_shard": {
Type: schema.TypeBool,
Optional: true,
},
"advanced_configuration": SchemaAdvancedConfigDS(),
"backup_enabled": {
Type: schema.TypeBool,
Expand Down Expand Up @@ -108,6 +116,14 @@ func DataSource() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"zone_id": {
Type: schema.TypeString,
Computed: true,
},
"external_id": {
Type: schema.TypeString,
Computed: true,
},
"num_shards": {
Type: schema.TypeInt,
Computed: true,
Expand Down Expand Up @@ -235,50 +251,119 @@ func DataSource() *schema.Resource {

func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
connV2 := meta.(*config.MongoDBClient).AtlasV2
connLatest := meta.(*config.MongoDBClient).AtlasV2Preview

projectID := d.Get("project_id").(string)
clusterName := d.Get("name").(string)
useReplicationSpecPerShard := false
var replicationSpecs []map[string]any
var clusterID string

cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute()
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
return nil
if v, ok := d.GetOk("use_replication_spec_per_shard"); ok {
useReplicationSpecPerShard = v.(bool)
}

if !useReplicationSpecPerShard {
clusterDescOld, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute()
if err != nil {
if resp != nil {
if resp.StatusCode == http.StatusNotFound {
return nil
}
if admin20231115.IsErrorCode(err, "ASYMMETRIC_SHARD_UNSUPPORTED") {
return diag.FromErr(fmt.Errorf("please add `use_replication_spec_per_shard = true` to your data source configuration to enable asymmetric shard support. Refer to documentation for more details. %s", err))
}
}
return diag.FromErr(fmt.Errorf(errorRead, clusterName, err))
}

clusterID = clusterDescOld.GetId()

if err := d.Set("disk_size_gb", clusterDescOld.GetDiskSizeGB()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err))
}

replicationSpecs, err = FlattenAdvancedReplicationSpecsOldSDK(ctx, clusterDescOld.GetReplicationSpecs(), clusterDescOld.GetDiskSizeGB(), d.Get("replication_specs").([]any), d, connLatest)
if err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
}

diags := setCommonSchemaFields(d, convertClusterDescToLatestExcludeRepSpecs(clusterDescOld))
if diags.HasError() {
return diags
}
} else {
clusterDescLatest, resp, err := connLatest.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute()
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
return nil
}
return diag.FromErr(fmt.Errorf(errorRead, clusterName, err))
}

clusterID = clusterDescLatest.GetId()

replicationSpecs, err = flattenAdvancedReplicationSpecsDS(ctx, clusterDescLatest.GetReplicationSpecs(), d, connLatest)
if err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
}
Comment on lines +306 to +309
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

within this path, would we need to also disk_size_gb by setting the value defined in the nested hardware spec object?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you mean set the root level disk_size_gb based on the nested value?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes was thinking this case

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

discussed offline, will keep as is for now


diags := setCommonSchemaFields(d, clusterDescLatest)
if diags.HasError() {
return diags
}
return diag.FromErr(fmt.Errorf(errorRead, clusterName, err))
}

if err := d.Set("replication_specs", replicationSpecs); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
}

// TODO: update to use connLatest to call below API
processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute()
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks like return type of GetClusterAdvancedConfiguration call has changed from *admin.ClusterDescriptionProcessArgs to map[string]interface{} in the latest SDK

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will look more into this as part of https://jira.mongodb.org/browse/CLOUDP-258709, will continue using the old SDK for this API call for now

if err != nil {
return diag.FromErr(fmt.Errorf(ErrorAdvancedConfRead, clusterName, err))
}

if err := d.Set("advanced_configuration", flattenProcessArgs(processArgs)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "advanced_configuration", clusterName, err))
}

d.SetId(clusterID)
return nil
}

func setCommonSchemaFields(d *schema.ResourceData, cluster *admin.ClusterDescription20240710) diag.Diagnostics {
clusterName := *cluster.Name

if err := d.Set("backup_enabled", cluster.GetBackupEnabled()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "backup_enabled", clusterName, err))
}

if err := d.Set("bi_connector_config", flattenBiConnectorConfig(cluster.GetBiConnector())); err != nil {
if err := d.Set("bi_connector_config", flattenBiConnectorConfig(cluster.BiConnector)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "bi_connector_config", clusterName, err))
}

if err := d.Set("cluster_type", cluster.GetClusterType()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "cluster_type", clusterName, err))
}

if err := d.Set("connection_strings", flattenConnectionStrings(cluster.GetConnectionStrings())); err != nil {
if err := d.Set("connection_strings", flattenConnectionStrings(*cluster.ConnectionStrings)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "connection_strings", clusterName, err))
}

if err := d.Set("create_date", conversion.TimePtrToStringPtr(cluster.CreateDate)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "create_date", clusterName, err))
}

if err := d.Set("disk_size_gb", cluster.GetDiskSizeGB()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err))
}

if err := d.Set("encryption_at_rest_provider", cluster.GetEncryptionAtRestProvider()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "encryption_at_rest_provider", clusterName, err))
}

if err := d.Set("labels", flattenLabels(cluster.GetLabels())); err != nil {
if err := d.Set("labels", flattenLabels(*cluster.Labels)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "labels", clusterName, err))
}

if err := d.Set("tags", conversion.FlattenTags(cluster.GetTags())); err != nil {
if err := d.Set("tags", flattenTags(cluster.Tags)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "tags", clusterName, err))
}

Expand All @@ -302,15 +387,6 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "pit_enabled", clusterName, err))
}

replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV2)
if err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
}

if err := d.Set("replication_specs", replicationSpecs); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
}

if err := d.Set("root_cert_type", cluster.GetRootCertType()); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "state_name", clusterName, err))
}
Expand All @@ -328,15 +404,5 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err))
}

processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute()
if err != nil {
return diag.FromErr(fmt.Errorf(ErrorAdvancedConfRead, clusterName, err))
}

if err := d.Set("advanced_configuration", flattenProcessArgs(processArgs)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "advanced_configuration", clusterName, err))
}

d.SetId(cluster.GetId())
return nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,16 @@ import (
"log"
"net/http"

admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin"
"go.mongodb.org/atlas-sdk/v20240530001/admin"

"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/id"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"

"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/config"
"go.mongodb.org/atlas-sdk/v20231115014/admin"
)

func PluralDataSource() *schema.Resource {
Expand Down Expand Up @@ -246,6 +249,7 @@ func PluralDataSource() *schema.Resource {

func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics {
connV2 := meta.(*config.MongoDBClient).AtlasV2
connLatest := meta.(*config.MongoDBClient).AtlasV2Preview
projectID := d.Get("project_id").(string)
d.SetId(id.UniqueId())

Expand All @@ -256,36 +260,36 @@ func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any)
}
return diag.FromErr(fmt.Errorf("error reading advanced cluster list for project(%s): %s", projectID, err))
}
if err := d.Set("results", flattenAdvancedClusters(ctx, connV2, list.GetResults(), d)); err != nil {
if err := d.Set("results", flattenAdvancedClusters(ctx, connV2, connLatest, list.GetResults(), d)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "results", d.Id(), err))
}

return nil
}

func flattenAdvancedClusters(ctx context.Context, connV2 *admin.APIClient, clusters []admin.AdvancedClusterDescription, d *schema.ResourceData) []map[string]any {
func flattenAdvancedClusters(ctx context.Context, connV2 *admin20231115.APIClient, connLatest *admin.APIClient, clusters []admin20231115.AdvancedClusterDescription, d *schema.ResourceData) []map[string]any {
results := make([]map[string]any, 0, len(clusters))
for i := range clusters {
cluster := &clusters[i]
processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute()
if err != nil {
log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", cluster.GetId(), err)
}
replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), nil, d, connV2)
replicationSpecs, err := FlattenAdvancedReplicationSpecsOldSDK(ctx, cluster.GetReplicationSpecs(), cluster.GetDiskSizeGB(), nil, d, connLatest)
if err != nil {
log.Printf("[WARN] Error setting `replication_specs` for the cluster(%s): %s", cluster.GetId(), err)
}

result := map[string]any{
"advanced_configuration": flattenProcessArgs(processArgs),
"backup_enabled": cluster.GetBackupEnabled(),
"bi_connector_config": flattenBiConnectorConfig(cluster.GetBiConnector()),
"bi_connector_config": flattenBiConnectorConfig(convertBiConnectToLatest(cluster.BiConnector)),
"cluster_type": cluster.GetClusterType(),
"create_date": conversion.TimePtrToStringPtr(cluster.CreateDate),
"connection_strings": flattenConnectionStrings(cluster.GetConnectionStrings()),
"connection_strings": flattenConnectionStrings(*convertConnectionStringToLatest(cluster.ConnectionStrings)),
"disk_size_gb": cluster.GetDiskSizeGB(),
"encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(),
"labels": flattenLabels(cluster.GetLabels()),
"labels": flattenLabels(*convertLabelsToLatest(cluster.Labels)),
"tags": conversion.FlattenTags(cluster.GetTags()),
"mongo_db_major_version": cluster.GetMongoDBMajorVersion(),
"mongo_db_version": cluster.GetMongoDBVersion(),
Expand Down
Loading
Loading