From bfc18aaad345e5a20fd85172d10370ef5e85c134 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 27 Jun 2024 16:13:17 +0200 Subject: [PATCH 01/84] doc: Updates `mongodbatlas_global_cluster_config` doc about self-managed sharding clusters (#2372) * update doc * add link --- .../r/global_cluster_config.html.markdown | 47 +------------------ 1 file changed, 2 insertions(+), 45 deletions(-) diff --git a/website/docs/r/global_cluster_config.html.markdown b/website/docs/r/global_cluster_config.html.markdown index 46fd14fe6c..18fe29f942 100644 --- a/website/docs/r/global_cluster_config.html.markdown +++ b/website/docs/r/global_cluster_config.html.markdown @@ -13,6 +13,8 @@ description: |- -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. +-> **NOTE:** This resource can only be used with Atlas-managed clusters. See doc for `global_cluster_self_managed_sharding` attribute in [`mongodbatlas_advanced_cluster` resource](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster) for more info. + ~> **IMPORTANT:** A Global Cluster Configuration, once created, can only be deleted. You can recreate the Global Cluster with the same data only in the Atlas UI. This is because the configuration and its related collection with shard key and indexes are managed separately and they would end up in an inconsistent state. [Read more about Global Cluster Configuration](https://www.mongodb.com/docs/atlas/global-clusters/) ## Examples Usage @@ -72,51 +74,6 @@ description: |- } ``` -### Example Global cluster config - -```terraform -resource "mongodbatlas_cluster" "cluster-test" { - project_id = "" - name = "cluster-test" - - cluster_type = "REPLICASET" - replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - - backup_enabled = true - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - - //Provider Settings "block" - provider_name = "AWS" - provider_instance_size_name = "M40" -} - -resource "mongodbatlas_global_cluster_config" "config" { - project_id = mongodbatlas_cluster.test.project_id - cluster_name = mongodbatlas_cluster.test.name - - managed_namespaces { - db = "mydata" - collection = "publishers" - custom_shard_key = "city" - } - - custom_zone_mappings { - location = "CA" - zone = "Zone 1" - } -} -``` - - ## Argument Reference * `project_id` - (Required) The unique ID for the project to create the database user. From 78e6ebad526d3e450a72990fca83bb70f1d387df Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:41:49 +0200 Subject: [PATCH 02/84] test: Unifies Azure and GCP networking tests (#2371) * unify Azure and GCP tests * TEMPORARY no update * Revert "TEMPORARY no update" This reverts commit ab60d67dece8f53272b2fad4a68b60b890e7636c. * run in parallel --- .../resource_network_peering_test.go | 103 ++++-------------- 1 file changed, 23 insertions(+), 80 deletions(-) diff --git a/internal/service/networkpeering/resource_network_peering_test.go b/internal/service/networkpeering/resource_network_peering_test.go index c4e50a456d..03c00ff158 100644 --- a/internal/service/networkpeering/resource_network_peering_test.go +++ b/internal/service/networkpeering/resource_network_peering_test.go @@ -25,44 +25,7 @@ func TestAccNetworkNetworkPeering_basicAWS(t *testing.T) { resource.ParallelTest(t, *basicAWSTestCase(t)) } -func TestAccNetworkRSNetworkPeering_basicAzure(t *testing.T) { - var ( - projectID = acc.ProjectIDExecution(t) - directoryID = os.Getenv("AZURE_DIRECTORY_ID") - subscriptionID = os.Getenv("AZURE_SUBSCRIPTION_ID") - resourceGroupName = os.Getenv("AZURE_RESOURCE_GROUP_NAME") - vNetName = os.Getenv("AZURE_VNET_NAME") - providerName = "AZURE" - ) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t); acc.PreCheckPeeringEnvAzure(t) }, - ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - CheckDestroy: acc.CheckDestroyNetworkPeering, - Steps: []resource.TestStep{ - { - Config: configAzure(projectID, providerName, directoryID, subscriptionID, resourceGroupName, vNetName), - Check: resource.ComposeTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttrSet(resourceName, "container_id"), - resource.TestCheckResourceAttr(resourceName, "provider_name", providerName), - resource.TestCheckResourceAttr(resourceName, "vnet_name", vNetName), - resource.TestCheckResourceAttr(resourceName, "azure_directory_id", directoryID), - ), - }, - { - ResourceName: resourceName, - ImportStateIdFunc: importStateIDFunc(resourceName), - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"container_id"}, - }, - }, - }) -} - -func TestAccNetworkRSNetworkPeering_updateBasicAzure(t *testing.T) { +func TestAccNetworkRSNetworkPeering_Azure(t *testing.T) { var ( projectID = acc.ProjectIDExecution(t) directoryID = os.Getenv("AZURE_DIRECTORY_ID") @@ -73,7 +36,7 @@ func TestAccNetworkRSNetworkPeering_updateBasicAzure(t *testing.T) { providerName = "AZURE" ) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t); acc.PreCheckPeeringEnvAzure(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyNetworkPeering, @@ -100,52 +63,18 @@ func TestAccNetworkRSNetworkPeering_updateBasicAzure(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "azure_directory_id", directoryID), ), }, - }, - }) -} - -func TestAccNetworkRSNetworkPeering_basicGCP(t *testing.T) { - acc.SkipTestForCI(t) // needs GCP configuration - - var ( - projectID = os.Getenv("MONGODB_ATLAS_PROJECT_ID") - providerName = "GCP" - gcpProjectID = os.Getenv("GCP_PROJECT_ID") - networkName = acc.RandomName() - ) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.PreCheck(t); acc.PreCheckPeeringEnvGCP(t) }, - ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - CheckDestroy: acc.CheckDestroyNetworkPeering, - Steps: []resource.TestStep{ - { - Config: configGCP(projectID, providerName, gcpProjectID, networkName), - Check: resource.ComposeTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttrSet(resourceName, "container_id"), - - resource.TestCheckResourceAttr(resourceName, "provider_name", providerName), - resource.TestCheckResourceAttr(resourceName, "gcp_project_id", gcpProjectID), - resource.TestCheckResourceAttr(resourceName, "network_name", networkName), - - // computed values that are obtain from associated container, checks for existing prefix convention to ensure they are gcp related values - resource.TestCheckResourceAttrWith(resourceName, "atlas_gcp_project_id", acc.MatchesExpression("p-.*")), - resource.TestCheckResourceAttrWith(resourceName, "atlas_vpc_name", acc.MatchesExpression("nt-.*")), - ), - }, { - ResourceName: resourceName, - ImportStateIdFunc: importStateIDFunc(resourceName), - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportStateIdFunc: importStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"container_id"}, }, }, }) } -func TestAccNetworkRSNetworkPeering_updateBasicGCP(t *testing.T) { +func TestAccNetworkRSNetworkPeering_GCP(t *testing.T) { acc.SkipTestForCI(t) // needs GCP configuration var ( @@ -156,7 +85,7 @@ func TestAccNetworkRSNetworkPeering_updateBasicGCP(t *testing.T) { updatedNetworkName = acc.RandomName() ) - resource.Test(t, resource.TestCase{ + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheck(t); acc.PreCheckPeeringEnvGCP(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyNetworkPeering, @@ -172,6 +101,10 @@ func TestAccNetworkRSNetworkPeering_updateBasicGCP(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "provider_name", providerName), resource.TestCheckResourceAttr(resourceName, "gcp_project_id", gcpProjectID), resource.TestCheckResourceAttr(resourceName, "network_name", networkName), + + // computed values that are obtain from associated container, checks for existing prefix convention to ensure they are gcp related values + resource.TestCheckResourceAttrWith(resourceName, "atlas_gcp_project_id", acc.MatchesExpression("p-.*")), + resource.TestCheckResourceAttrWith(resourceName, "atlas_vpc_name", acc.MatchesExpression("nt-.*")), ), }, { @@ -185,8 +118,18 @@ func TestAccNetworkRSNetworkPeering_updateBasicGCP(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "provider_name", providerName), resource.TestCheckResourceAttr(resourceName, "gcp_project_id", gcpProjectID), resource.TestCheckResourceAttr(resourceName, "network_name", updatedNetworkName), + + // computed values that are obtain from associated container, checks for existing prefix convention to ensure they are gcp related values + resource.TestCheckResourceAttrWith(resourceName, "atlas_gcp_project_id", acc.MatchesExpression("p-.*")), + resource.TestCheckResourceAttrWith(resourceName, "atlas_vpc_name", acc.MatchesExpression("nt-.*")), ), }, + { + ResourceName: resourceName, + ImportStateIdFunc: importStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, }, }) } From c647bf248863b9b1494c7f7ed7c253d64b42999b Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Thu, 27 Jun 2024 18:45:41 +0000 Subject: [PATCH 03/84] chore: Updates examples link in index.html.markdown for v1.17.3 release --- website/docs/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 6732407efa..1df9784687 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -226,7 +226,7 @@ We ship binaries but do not prioritize fixes for the following operating system ## Examples from MongoDB and the Community -We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.2/examples) +We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.3/examples) in our GitHub repo that will help both beginner and more advanced users. Have a good example you've created and want to share? From 1fad9f4a054d0181694f87db13fec983c0346c7d Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Thu, 27 Jun 2024 18:46:09 +0000 Subject: [PATCH 04/84] chore: Updates CHANGELOG.md header for v1.17.3 release --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7be70fce86..a84a5bbe2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ ## (Unreleased) +## 1.17.3 (June 27, 2024) + ## 1.17.2 (June 20, 2024) ENHANCEMENTS: From 45bc5e51c3ab9a6fa53f0ce30b8fc97da9785037 Mon Sep 17 00:00:00 2001 From: svc-apix-Bot <142542575+svc-apix-Bot@users.noreply.github.com> Date: Mon, 1 Jul 2024 11:37:49 +0200 Subject: [PATCH 05/84] doc: Updates Terraform Compatibility Matrix documentation (#2370) Co-authored-by: maastha <122359335+maastha@users.noreply.github.com> --- website/docs/index.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 1df9784687..fcf4c541a7 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -185,6 +185,7 @@ For more information on configuring and managing programmatic API Keys see the [ | HashiCorp Terraform Release | HashiCorp Terraform Release Date | HashiCorp Terraform Full Support End Date | MongoDB Atlas Support End Date | |:-------:|:------------:|:------------:|:------------:| +| 1.9.x | 2024-06-26 | 2026-06-30 | 2026-06-30 | | 1.8.x | 2024-04-10 | 2026-04-30 | 2026-04-30 | | 1.7.x | 2024-01-17 | 2026-01-31 | 2026-01-31 | | 1.6.x | 2023-10-04 | 2025-10-31 | 2025-10-31 | From 7068f3674a0cacb3c013d7a01f364bee5e8d5177 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Mon, 1 Jul 2024 12:59:06 +0200 Subject: [PATCH 06/84] use ComposeAggregateTestCheckFunc (#2375) --- .../provider/provider_authentication_test.go | 2 +- .../data_source_accesslist_api_key_test.go | 2 +- .../data_source_accesslist_api_keys_test.go | 2 +- ...urce_access_list_api_key_migration_test.go | 6 +- .../resource_access_list_api_key_test.go | 12 +-- .../data_source_alert_configuration_test.go | 8 +- .../data_source_alert_configurations_test.go | 6 +- ...urce_alert_configuration_migration_test.go | 10 +- .../resource_alert_configuration_test.go | 40 ++++---- .../apikey/data_source_api_key_test.go | 2 +- .../apikey/data_source_api_keys_test.go | 2 +- .../apikey/resource_api_key_migration_test.go | 2 +- .../service/apikey/resource_api_key_test.go | 4 +- .../atlasuser/data_source_atlas_user_test.go | 4 +- .../atlasuser/data_source_atlas_users_test.go | 8 +- .../resource_auditing_migration_test.go | 2 +- .../auditing/resource_auditing_test.go | 4 +- .../resource_backup_compliance_policy_test.go | 8 +- ...ce_cloud_backup_schedule_migration_test.go | 2 +- .../resource_cloud_backup_schedule_test.go | 20 ++-- ...ce_cloud_backup_snapshot_migration_test.go | 4 +- .../resource_cloud_backup_snapshot_test.go | 4 +- ...loud_backup_snapshot_export_bucket_test.go | 2 +- ...e_cloud_backup_snapshot_export_job_test.go | 2 +- ..._cloud_backup_snapshot_restore_job_test.go | 4 +- ...loud_provider_access_authorization_test.go | 4 +- ...source_cloud_provider_access_setup_test.go | 4 +- .../service/cluster/resource_cluster_test.go | 92 +++++++++---------- ...luster_outage_simulation_migration_test.go | 4 +- ...resource_cluster_outage_simulation_test.go | 4 +- .../data_source_test.go | 2 +- .../data_source_custom_db_role_test.go | 2 +- .../data_source_custom_db_roles_test.go | 2 +- .../resource_custom_db_role_migration_test.go | 2 +- .../resource_custom_db_role_test.go | 18 ++-- ...stom_dns_configuration_cluster_aws_test.go | 2 +- ...onfiguration_cluster_aws_migration_test.go | 2 +- ...stom_dns_configuration_cluster_aws_test.go | 6 +- .../data_source_database_user_test.go | 2 +- .../data_source_database_users_test.go | 2 +- .../resource_database_user_migration_test.go | 18 ++-- .../resource_database_user_test.go | 38 ++++---- ...data_source_data_lake_pipeline_run_test.go | 2 +- ...ata_source_data_lake_pipeline_runs_test.go | 2 +- ...ource_data_lake_pipeline_migration_test.go | 2 +- .../resource_data_lake_pipeline_test.go | 2 +- ...ource_encryption_at_rest_migration_test.go | 10 +- .../resource_encryption_at_rest_test.go | 14 +-- .../data_source_event_trigger_test.go | 2 +- .../data_source_event_triggers_test.go | 2 +- .../resource_event_trigger_test.go | 30 +++--- ...source_federated_database_instance_test.go | 2 +- ...ource_federated_database_instances_test.go | 2 +- ...erated_database_instance_migration_test.go | 2 +- ...source_federated_database_instance_test.go | 8 +- .../resource_federated_query_limit_test.go | 2 +- ...derated_settings_identity_provider_test.go | 2 +- ...erated_settings_identity_providers_test.go | 10 +- ...derated_settings_identity_provider_test.go | 6 +- ...e_federated_settings_connected_org_test.go | 2 +- ..._federated_settings_connected_orgs_test.go | 2 +- .../data_source_federated_settings_test.go | 2 +- ...e_federated_settings_connected_org_test.go | 6 +- ...ederated_settings_org_role_mapping_test.go | 4 +- .../data_source_global_cluster_config_test.go | 2 +- ...ce_global_cluster_config_migration_test.go | 2 +- .../resource_global_cluster_config_test.go | 6 +- .../resource_ldap_configuration_test.go | 4 +- .../ldapverify/resource_ldap_verify_test.go | 4 +- .../data_source_maintenance_window_test.go | 2 +- ...ource_maintenance_window_migration_test.go | 2 +- .../resource_maintenance_window_test.go | 10 +- ...source_network_container_migration_test.go | 6 +- .../resource_network_container_test.go | 20 ++-- .../resource_network_peering_test.go | 12 +-- .../resource_online_archive_migration_test.go | 4 +- .../resource_online_archive_test.go | 26 +++--- .../data_source_organization_test.go | 2 +- .../data_source_organizations_test.go | 4 +- .../resource_organization_migration_test.go | 2 +- .../resource_organization_test.go | 10 +- .../data_source_org_invitation_test.go | 2 +- .../resource_org_invitation_migration_test.go | 2 +- .../resource_org_invitation_test.go | 4 +- ...rce_private_endpoint_regional_mode_test.go | 8 +- .../data_source_privatelink_endpoint_test.go | 2 +- ...rce_privatelink_endpoint_migration_test.go | 2 +- .../resource_privatelink_endpoint_test.go | 6 +- ...ce_privatelink_endpoint_serverless_test.go | 2 +- ...ource_privatelink_endpoint_service_test.go | 2 +- ...ice_data_federation_online_archive_test.go | 2 +- ...ce_data_federation_online_archives_test.go | 2 +- ...ederation_online_archive_migration_test.go | 2 +- ...ice_data_federation_online_archive_test.go | 10 +- ...point_service_serverless_migration_test.go | 4 +- ...telink_endpoint_service_serverless_test.go | 8 +- .../resource_project_migration_test.go | 12 +-- .../service/project/resource_project_test.go | 40 ++++---- .../resource_project_api_key_test.go | 18 ++-- .../data_source_project_invitation_test.go | 2 +- ...ource_project_invitation_migration_test.go | 2 +- .../resource_project_invitation_test.go | 4 +- ...e_project_ip_access_list_migration_test.go | 6 +- .../resource_project_ip_access_list_test.go | 16 ++-- .../pushbasedlogexport/resource_test.go | 6 +- ...source_search_deployment_migration_test.go | 2 +- .../resource_search_deployment_test.go | 2 +- .../searchindex/resource_search_index_test.go | 22 ++--- .../resource_serverless_instance_test.go | 12 +-- .../data_source_stream_connections_test.go | 2 +- .../resource_stream_connection_test.go | 6 +- .../data_source_stream_instance_test.go | 2 +- .../data_source_stream_instances_test.go | 4 +- .../resource_stream_instance_test.go | 6 +- .../service/team/data_source_team_test.go | 4 +- .../team/resource_team_migration_test.go | 2 +- internal/service/team/resource_team_test.go | 8 +- .../resource_third_party_integration_test.go | 28 +++--- ...entication_database_user_migration_test.go | 4 +- ..._x509_authentication_database_user_test.go | 6 +- 120 files changed, 437 insertions(+), 437 deletions(-) diff --git a/internal/provider/provider_authentication_test.go b/internal/provider/provider_authentication_test.go index b779896002..c91510427b 100644 --- a/internal/provider/provider_authentication_test.go +++ b/internal/provider/provider_authentication_test.go @@ -22,7 +22,7 @@ func TestAccSTSAssumeRole_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configProject(orgID, projectName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "cluster_count", "0"), diff --git a/internal/service/accesslistapikey/data_source_accesslist_api_key_test.go b/internal/service/accesslistapikey/data_source_accesslist_api_key_test.go index 72365885c8..a987054317 100644 --- a/internal/service/accesslistapikey/data_source_accesslist_api_key_test.go +++ b/internal/service/accesslistapikey/data_source_accesslist_api_key_test.go @@ -23,7 +23,7 @@ func TestAccConfigDSAccesslistAPIKey_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(orgID, description, ipAddress), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttrSet(resourceName, "ip_address"), diff --git a/internal/service/accesslistapikey/data_source_accesslist_api_keys_test.go b/internal/service/accesslistapikey/data_source_accesslist_api_keys_test.go index a89a075881..82f1d14384 100644 --- a/internal/service/accesslistapikey/data_source_accesslist_api_keys_test.go +++ b/internal/service/accesslistapikey/data_source_accesslist_api_keys_test.go @@ -25,7 +25,7 @@ func TestAccConfigDSAccesslistAPIKeys_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDSPlural(orgID, description, ipAddress), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttrSet(resourceName, "ip_address"), diff --git a/internal/service/accesslistapikey/resource_access_list_api_key_migration_test.go b/internal/service/accesslistapikey/resource_access_list_api_key_migration_test.go index db2510acfb..227fa37c7f 100644 --- a/internal/service/accesslistapikey/resource_access_list_api_key_migration_test.go +++ b/internal/service/accesslistapikey/resource_access_list_api_key_migration_test.go @@ -26,7 +26,7 @@ func TestMigProjectAccesslistAPIKey_SettingIPAddress(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: configWithIPAddress(orgID, description, ipAddress), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "ip_address", ipAddress), @@ -54,7 +54,7 @@ func TestMigProjectAccesslistAPIKey_SettingCIDRBlock(t *testing.T) { { ExternalProviders: acc.ExternalProviders("1.14.0"), Config: configWithCIDRBlock(orgID, description, cidrBlock), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cidr_block", cidrBlock), @@ -92,7 +92,7 @@ func TestMigProjectAccesslistAPIKey_SettingCIDRBlock_WideCIDR_SDKMigration(t *te { ExternalProviders: acc.ExternalProviders("1.14.0"), Config: configWithCIDRBlock(orgID, description, cidrBlock), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cidr_block", cidrBlock), diff --git a/internal/service/accesslistapikey/resource_access_list_api_key_test.go b/internal/service/accesslistapikey/resource_access_list_api_key_test.go index 887736fd40..a070953f2a 100644 --- a/internal/service/accesslistapikey/resource_access_list_api_key_test.go +++ b/internal/service/accesslistapikey/resource_access_list_api_key_test.go @@ -28,7 +28,7 @@ func TestAccProjectRSAccesslistAPIKey_SettingIPAddress(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithIPAddress(orgID, description, ipAddress), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "ip_address", ipAddress), @@ -36,7 +36,7 @@ func TestAccProjectRSAccesslistAPIKey_SettingIPAddress(t *testing.T) { }, { Config: configWithIPAddress(orgID, description, updatedIPAddress), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "ip_address", updatedIPAddress), @@ -68,7 +68,7 @@ func TestAccProjectRSAccessListAPIKey_SettingCIDRBlock(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithCIDRBlock(orgID, description, cidrBlock), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cidr_block", cidrBlock), @@ -76,7 +76,7 @@ func TestAccProjectRSAccessListAPIKey_SettingCIDRBlock(t *testing.T) { }, { Config: configWithCIDRBlock(orgID, description, updatedCIDRBlock), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cidr_block", updatedCIDRBlock), @@ -102,7 +102,7 @@ func TestAccProjectRSAccessListAPIKey_SettingCIDRBlock_WideCIDR(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithCIDRBlock(orgID, description, cidrBlock), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cidr_block", cidrBlock), @@ -110,7 +110,7 @@ func TestAccProjectRSAccessListAPIKey_SettingCIDRBlock_WideCIDR(t *testing.T) { }, { Config: configWithCIDRBlock(orgID, description, updatedCIDRBlock), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cidr_block", updatedCIDRBlock), diff --git a/internal/service/alertconfiguration/data_source_alert_configuration_test.go b/internal/service/alertconfiguration/data_source_alert_configuration_test.go index edf53b99f4..8c32684ad7 100644 --- a/internal/service/alertconfiguration/data_source_alert_configuration_test.go +++ b/internal/service/alertconfiguration/data_source_alert_configuration_test.go @@ -21,7 +21,7 @@ func TestAccConfigDSAlertConfiguration_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicDS(projectID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(dataSourceName), resource.TestCheckResourceAttr(dataSourceName, "project_id", projectID), resource.TestCheckResourceAttr(dataSourceName, "notification.#", "1"), @@ -47,7 +47,7 @@ func TestAccConfigDSAlertConfiguration_withThreshold(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithThreshold(projectID, true, 1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(dataSourceName), resource.TestCheckResourceAttr(dataSourceName, "project_id", projectID), resource.TestCheckResourceAttr(dataSourceName, "notification.#", "1"), @@ -73,7 +73,7 @@ func TestAccConfigDSAlertConfiguration_withOutput(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithOutputs(projectID, outputLabel), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(dataSourceName), resource.TestCheckResourceAttr(dataSourceName, "project_id", projectID), resource.TestCheckResourceAttr(dataSourceName, "notification.#", "1"), @@ -102,7 +102,7 @@ func TestAccConfigDSAlertConfiguration_withPagerDuty(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithPagerDutyDS(projectID, serviceKey, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(dataSourceName), resource.TestCheckResourceAttr(dataSourceName, "project_id", projectID), ), diff --git a/internal/service/alertconfiguration/data_source_alert_configurations_test.go b/internal/service/alertconfiguration/data_source_alert_configurations_test.go index c725a7ec5e..61bb45de90 100644 --- a/internal/service/alertconfiguration/data_source_alert_configurations_test.go +++ b/internal/service/alertconfiguration/data_source_alert_configurations_test.go @@ -26,7 +26,7 @@ func TestAccConfigDSAlertConfigurations_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicPluralDS(projectID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkCount(dataSourcePluralName), resource.TestCheckResourceAttr(dataSourcePluralName, "project_id", projectID), resource.TestCheckNoResourceAttr(dataSourcePluralName, "total_count"), @@ -48,7 +48,7 @@ func TestAccConfigDSAlertConfigurations_withOutputTypes(t *testing.T) { Steps: []resource.TestStep{ { Config: configOutputType(projectID, outputTypes), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkCount(dataSourcePluralName), resource.TestCheckResourceAttr(dataSourcePluralName, "project_id", projectID), resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.output.#", "2"), @@ -86,7 +86,7 @@ func TestAccConfigDSAlertConfigurations_totalCount(t *testing.T) { Steps: []resource.TestStep{ { Config: configTotalCount(projectID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkCount(dataSourcePluralName), resource.TestCheckResourceAttr(dataSourcePluralName, "project_id", projectID), resource.TestCheckResourceAttrSet(dataSourcePluralName, "total_count"), diff --git a/internal/service/alertconfiguration/resource_alert_configuration_migration_test.go b/internal/service/alertconfiguration/resource_alert_configuration_migration_test.go index 150d49bee5..3c5320e8f6 100644 --- a/internal/service/alertconfiguration/resource_alert_configuration_migration_test.go +++ b/internal/service/alertconfiguration/resource_alert_configuration_migration_test.go @@ -21,7 +21,7 @@ func TestMigConfigRSAlertConfiguration_withNotificationsMetricThreshold(t *testi { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.#", "2"), @@ -45,7 +45,7 @@ func TestMigConfigRSAlertConfiguration_withThreshold(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), @@ -71,7 +71,7 @@ func TestMigConfigRSAlertConfiguration_withEmptyOptionalBlocks(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), @@ -108,7 +108,7 @@ func TestMigConfigRSAlertConfiguration_withMultipleMatchers(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "matcher.#", "2"), @@ -132,7 +132,7 @@ func TestMigConfigRSAlertConfiguration_withEmptyOptionalAttributes(t *testing.T) { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), diff --git a/internal/service/alertconfiguration/resource_alert_configuration_test.go b/internal/service/alertconfiguration/resource_alert_configuration_test.go index 7dd80a1b1c..6f32d1a295 100644 --- a/internal/service/alertconfiguration/resource_alert_configuration_test.go +++ b/internal/service/alertconfiguration/resource_alert_configuration_test.go @@ -35,7 +35,7 @@ func TestAccConfigRSAlertConfiguration_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicRS(projectID, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.#", "2"), @@ -43,7 +43,7 @@ func TestAccConfigRSAlertConfiguration_basic(t *testing.T) { }, { Config: configBasicRS(projectID, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.#", "2"), @@ -93,7 +93,7 @@ func TestAccConfigRSAlertConfiguration_withEmptyMatcherMetricThresholdConfig(t * Steps: []resource.TestStep{ { Config: configWithEmptyMatcherMetricThresholdConfig(projectID, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.#", "1"), @@ -116,14 +116,14 @@ func TestAccConfigRSAlertConfiguration_withNotifications(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithNotifications(projectID, true, true, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configWithNotifications(projectID, false, false, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -163,7 +163,7 @@ func TestAccConfigRSAlertConfiguration_withMatchers(t *testing.T) { "operator": "CONTAINS", "value": "MONGOS", }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -180,7 +180,7 @@ func TestAccConfigRSAlertConfiguration_withMatchers(t *testing.T) { "operator": "EQUALS", "value": "PRIMARY", }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -203,14 +203,14 @@ func TestAccConfigRSAlertConfiguration_withMetricUpdated(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithMetricUpdated(projectID, true, 99.0), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configWithMetricUpdated(projectID, false, 89.7), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -233,14 +233,14 @@ func TestAccConfigRSAlertConfiguration_withThresholdUpdated(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithThresholdUpdated(projectID, true, 1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configWithThresholdUpdated(projectID, false, 3), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -270,7 +270,7 @@ func TestAccConfigRSAlertConfiguration_withoutRoles(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithoutRoles(projectID, true, 99.0), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -293,7 +293,7 @@ func TestAccConfigRSAlertConfiguration_withoutOptionalAttributes(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithEmptyOptionalAttributes(projectID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -347,7 +347,7 @@ func TestAccConfigRSAlertConfiguration_updatePagerDutyWithNotifierId(t *testing. Steps: []resource.TestStep{ { Config: configWithPagerDutyNotifierID(projectID, notifierID, 10, &serviceKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.0.delay_min", "10"), @@ -356,7 +356,7 @@ func TestAccConfigRSAlertConfiguration_updatePagerDutyWithNotifierId(t *testing. }, { Config: configWithPagerDutyNotifierID(projectID, notifierID, 15, nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "notification.0.delay_min", "15"), @@ -382,7 +382,7 @@ func TestAccConfigRSAlertConfiguration_withDataDog(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithDataDog(projectID, ddAPIKey, ddRegion, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -406,7 +406,7 @@ func TestAccConfigRSAlertConfiguration_withPagerDuty(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithPagerDuty(projectID, serviceKey, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -437,7 +437,7 @@ func TestAccConfigAlertConfiguration_PagerDutyUsingIntegrationID(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithPagerDutyIntegrationID(orgID, projectName, serviceKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(nil, resourceName), resource.TestCheckResourceAttrSet(resourceName, "notification.0.integration_id"), resource.TestCheckResourceAttrSet(dataSourceName, "notification.0.integration_id"), @@ -462,7 +462,7 @@ func TestAccConfigRSAlertConfiguration_withOpsGenie(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithOpsGenie(projectID, apiKey, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -486,7 +486,7 @@ func TestAccConfigRSAlertConfiguration_withVictorOps(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithVictorOps(projectID, apiKey, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsUsingProxy(proxyPort, resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), diff --git a/internal/service/apikey/data_source_api_key_test.go b/internal/service/apikey/data_source_api_key_test.go index fe8fc05fb6..e9c9922d3b 100644 --- a/internal/service/apikey/data_source_api_key_test.go +++ b/internal/service/apikey/data_source_api_key_test.go @@ -25,7 +25,7 @@ func TestAccConfigDSAPIKey_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(orgID, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "description", description), diff --git a/internal/service/apikey/data_source_api_keys_test.go b/internal/service/apikey/data_source_api_keys_test.go index fab057922c..7b5c33b7b0 100644 --- a/internal/service/apikey/data_source_api_keys_test.go +++ b/internal/service/apikey/data_source_api_keys_test.go @@ -25,7 +25,7 @@ func TestAccConfigDSAPIKeys_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDSPlural(orgID, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "description", description), diff --git a/internal/service/apikey/resource_api_key_migration_test.go b/internal/service/apikey/resource_api_key_migration_test.go index 6fa43504e6..568dc81d0f 100644 --- a/internal/service/apikey/resource_api_key_migration_test.go +++ b/internal/service/apikey/resource_api_key_migration_test.go @@ -25,7 +25,7 @@ func TestMigConfigAPIKey_basic(t *testing.T) { { Config: config, ExternalProviders: mig.ExternalProviders(), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "description", description), diff --git a/internal/service/apikey/resource_api_key_test.go b/internal/service/apikey/resource_api_key_test.go index b9e4983401..9d860fd3e5 100644 --- a/internal/service/apikey/resource_api_key_test.go +++ b/internal/service/apikey/resource_api_key_test.go @@ -29,7 +29,7 @@ func TestAccConfigRSAPIKey_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "description", description), @@ -37,7 +37,7 @@ func TestAccConfigRSAPIKey_basic(t *testing.T) { }, { Config: configBasic(orgID, descriptionUpdate, roleNameUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "description", descriptionUpdate), diff --git a/internal/service/atlasuser/data_source_atlas_user_test.go b/internal/service/atlasuser/data_source_atlas_user_test.go index 4731a1fe47..bb5a8a676f 100644 --- a/internal/service/atlasuser/data_source_atlas_user_test.go +++ b/internal/service/atlasuser/data_source_atlas_user_test.go @@ -26,7 +26,7 @@ func TestAccConfigDSAtlasUser_ByUserID(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDSMongoDBAtlasUserByUserID(userID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( dataSourceChecksForUser(dataSourceName, "", user)..., ), }, @@ -47,7 +47,7 @@ func TestAccConfigDSAtlasUser_ByUsername(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDSMongoDBAtlasUserByUsername(username), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( dataSourceChecksForUser(dataSourceName, "", user)..., ), }, diff --git a/internal/service/atlasuser/data_source_atlas_users_test.go b/internal/service/atlasuser/data_source_atlas_users_test.go index cd602c4339..405ee22c19 100644 --- a/internal/service/atlasuser/data_source_atlas_users_test.go +++ b/internal/service/atlasuser/data_source_atlas_users_test.go @@ -30,7 +30,7 @@ func TestAccConfigDSAtlasUsers_ByOrgID(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDSMongoDBAtlasUsersByOrgID(orgID), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) @@ -51,7 +51,7 @@ func TestAccConfigDSAtlasUsers_ByProjectID(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDSMongoDBAtlasUsersByProjectID(projectName, orgID, projectOwnerID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "project_id"), resource.TestCheckResourceAttr(dataSourceName, "total_count", "1"), resource.TestCheckResourceAttr(dataSourceName, "results.#", "1"), // we know project will only have the project owner @@ -82,7 +82,7 @@ func TestAccConfigDSAtlasUsers_ByTeamID(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDSMongoDBAtlasUsersByTeamID(orgID, teamName, username), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "team_id"), resource.TestCheckResourceAttr(dataSourceName, "org_id", orgID), resource.TestCheckResourceAttr(dataSourceName, "total_count", "1"), @@ -116,7 +116,7 @@ func TestAccConfigDSAtlasUsers_UsingPagination(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDSMongoDBAtlasUsersByTeamWithPagination(orgID, teamName, username, itemsPerPage, pageNum), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "team_id"), resource.TestCheckResourceAttr(dataSourceName, "org_id", orgID), resource.TestCheckResourceAttr(dataSourceName, "total_count", "1"), diff --git a/internal/service/auditing/resource_auditing_migration_test.go b/internal/service/auditing/resource_auditing_migration_test.go index a13b0c034a..fc4fc7d3b4 100644 --- a/internal/service/auditing/resource_auditing_migration_test.go +++ b/internal/service/auditing/resource_auditing_migration_test.go @@ -23,7 +23,7 @@ func TestMigGenericAuditing_basic(t *testing.T) { { Config: config, ExternalProviders: mig.ExternalProviders(), - Check: resource.ComposeTestCheckFunc(checks(auditFilter, true, true)...), + Check: resource.ComposeAggregateTestCheckFunc(checks(auditFilter, true, true)...), }, mig.TestStepCheckEmptyPlan(config), }, diff --git a/internal/service/auditing/resource_auditing_test.go b/internal/service/auditing/resource_auditing_test.go index 49f7e9716c..100b80afc8 100644 --- a/internal/service/auditing/resource_auditing_test.go +++ b/internal/service/auditing/resource_auditing_test.go @@ -30,11 +30,11 @@ func TestAccGenericAuditing_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, auditFilter, true, true), - Check: resource.ComposeTestCheckFunc(checks(auditFilter, true, true)...), + Check: resource.ComposeAggregateTestCheckFunc(checks(auditFilter, true, true)...), }, { Config: configBasic(projectID, "{}", false, false), - Check: resource.ComposeTestCheckFunc(checks("{}", false, false)...), + Check: resource.ComposeAggregateTestCheckFunc(checks("{}", false, false)...), }, { ResourceName: resourceName, diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go index a198c7268d..bd0c81dc53 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go @@ -35,7 +35,7 @@ func TestAccBackupCompliancePolicy_update(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithoutOptionals(projectName, orgID, projectOwnerID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "authorized_user_first_name", "First"), resource.TestCheckResourceAttr(resourceName, "authorized_user_last_name", "Last"), @@ -46,7 +46,7 @@ func TestAccBackupCompliancePolicy_update(t *testing.T) { }, { Config: configBasic(projectName, orgID, projectOwnerID, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "authorized_user_first_name", "First"), resource.TestCheckResourceAttr(resourceName, "authorized_user_last_name", "Last"), @@ -91,7 +91,7 @@ func TestAccBackupCompliancePolicy_withoutRestoreWindowDays(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithoutRestoreDays(projectName, orgID, projectOwnerID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "copy_protection_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "encryption_at_rest_enabled", "false"), @@ -116,7 +116,7 @@ func basicTestCase(tb testing.TB, useYearly bool) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectName, orgID, projectOwnerID, useYearly), - Check: resource.ComposeTestCheckFunc(basicChecks()...), + Check: resource.ComposeAggregateTestCheckFunc(basicChecks()...), }, { ResourceName: resourceName, diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 55d991a9c8..a584571e79 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -28,7 +28,7 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index d3cd652f2b..9bb7898bb5 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -34,7 +34,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), @@ -62,7 +62,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), }, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), @@ -105,7 +105,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "false"), @@ -165,7 +165,7 @@ func TestAccBackupRSCloudBackupSchedule_export(t *testing.T) { Steps: []resource.TestStep{ { Config: configExportPolicies(&clusterInfo, policyName, roleName, bucketName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "true"), @@ -197,7 +197,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), @@ -231,7 +231,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), @@ -267,7 +267,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), @@ -317,7 +317,7 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), @@ -371,7 +371,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { RetentionUnit: "days", RetentionValue: 1, }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "1"), @@ -384,7 +384,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { RetentionUnit: "days", RetentionValue: 3, }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "2"), diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go index 3ceff53768..f15e1c1f93 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go @@ -23,7 +23,7 @@ func TestMigBackupRSCloudBackupSnapshot_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "type", "replicaSet"), @@ -57,7 +57,7 @@ func TestMigBackupRSCloudBackupSnapshot_sharded(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "type", "shardedCluster"), diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go index 65015afc3e..b93c361c65 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go @@ -32,7 +32,7 @@ func TestAccBackupRSCloudBackupSnapshot_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(&clusterInfo, description, retentionInDays), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "type", "replicaSet"), @@ -81,7 +81,7 @@ func TestAccBackupRSCloudBackupSnapshot_sharded(t *testing.T) { Steps: []resource.TestStep{ { Config: configSharded(projectID, clusterName, description, retentionInDays), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "type", "shardedCluster"), diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go index 55e8b50f0d..e2ba34e1c3 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go @@ -40,7 +40,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, bucketName, policyName, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "bucket_name", bucketName), diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go index c5ee88466d..4b451363e5 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go @@ -59,7 +59,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, bucketName, roleName, policyName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { ResourceName: resourceName, diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go index 7418f22183..188b026b33 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go @@ -38,7 +38,7 @@ func TestAccCloudBackupSnapshotRestoreJob_basicDownload(t *testing.T) { Steps: []resource.TestStep{ { Config: configDownload(projectID, clusterName, description, retentionInDays, useSnapshotID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_type_config.0.download", "true"), ), @@ -70,7 +70,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, clusterName, description, retentionInDays), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_type_config.0.automated", "true"), resource.TestCheckResourceAttr(resourceName, "delivery_type_config.0.target_cluster_name", clusterName), diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization_test.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization_test.go index a567d1be78..eb614d68b9 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization_test.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization_test.go @@ -59,7 +59,7 @@ func basicAuthorizationTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configAuthorizationAWS(projectID, policyName, roleName, federatedDatabaseInstanceName, testS3Bucket), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttrSet(resourceName, "role_id"), @@ -69,7 +69,7 @@ func basicAuthorizationTestCase(tb testing.TB) *resource.TestCase { }, { Config: configAuthorizationAWS(projectID, policyName, roleNameUpdated, federatedDatabaseInstanceName, testS3Bucket), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttrSet(resourceName, "role_id"), diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup_test.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup_test.go index 04efe80e85..29023957ef 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup_test.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup_test.go @@ -33,7 +33,7 @@ func TestAccCloudProviderAccessSetupAzure_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configSetupAzure(projectID, atlasAzureAppID, servicePrincipalID, tenantID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "role_id"), resource.TestCheckResourceAttrSet(resourceName, "azure_config.0.atlas_azure_app_id"), @@ -70,7 +70,7 @@ func basicSetupTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configSetupAWS(projectID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // same as regular cloud resource checkExists(resourceName), resource.TestCheckResourceAttrSet(dataSourceName, "aws_config.0.atlas_assumed_role_external_id"), diff --git a/internal/service/cluster/resource_cluster_test.go b/internal/service/cluster/resource_cluster_test.go index 1c2a0f6f7e..50dedc053c 100644 --- a/internal/service/cluster/resource_cluster_test.go +++ b/internal/service/cluster/resource_cluster_test.go @@ -42,7 +42,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configAWS(projectID, clusterName, true, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -64,7 +64,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { }, { Config: configAWS(projectID, clusterName, false, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -116,7 +116,7 @@ func partialAdvancedConfTestCase(tb testing.TB) *resource.TestCase { SampleSizeBIConnector: conversion.Pointer[int64](110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.fail_index_key_too_long", "false"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.javascript_enabled", "true"), @@ -141,7 +141,7 @@ func partialAdvancedConfTestCase(tb testing.TB) *resource.TestCase { Config: configAdvancedConfPartial(projectID, clusterName, "false", &matlas.ProcessArgs{ MinimumEnabledTLSProtocol: "TLS1_2", }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.fail_index_key_too_long", "false"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.javascript_enabled", "true"), @@ -180,7 +180,7 @@ func TestAccCluster_basic_DefaultWriteRead_AdvancedConf(t *testing.T) { SampleSizeBIConnector: conversion.Pointer[int64](110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.default_read_concern", "available"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.default_write_concern", "1"), @@ -196,7 +196,7 @@ func TestAccCluster_basic_DefaultWriteRead_AdvancedConf(t *testing.T) { Config: configAdvancedConfPartialDefault(projectID, clusterName, "false", &matlas.ProcessArgs{ MinimumEnabledTLSProtocol: "TLS1_2", }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.default_read_concern", "available"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.default_write_concern", "1"), @@ -239,7 +239,7 @@ func TestAccCluster_emptyAdvancedConf(t *testing.T) { SampleSizeBIConnector: conversion.Pointer[int64](110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.fail_index_key_too_long", "false"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.javascript_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.minimum_enabled_tls_protocol", "TLS1_1"), @@ -276,7 +276,7 @@ func TestAccCluster_basicAdvancedConf(t *testing.T) { SampleSizeBIConnector: conversion.Pointer[int64](110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.fail_index_key_too_long", "false"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.javascript_enabled", "true"), @@ -299,7 +299,7 @@ func TestAccCluster_basicAdvancedConf(t *testing.T) { SampleSizeBIConnector: conversion.Pointer[int64](0), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](60), }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.fail_index_key_too_long", "false"), resource.TestCheckResourceAttr(resourceName, "advanced_configuration.0.javascript_enabled", "false"), @@ -330,7 +330,7 @@ func TestAccCluster_basicAzure(t *testing.T) { Steps: []resource.TestStep{ { Config: configAzure(orgID, projectName, clusterName, "true", "M30", true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -341,7 +341,7 @@ func TestAccCluster_basicAzure(t *testing.T) { }, { Config: configAzure(orgID, projectName, clusterName, "false", "M30", true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -369,7 +369,7 @@ func TestAccCluster_basicGCP(t *testing.T) { Steps: []resource.TestStep{ { Config: configGCP(orgID, projectName, clusterName, "true"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -381,7 +381,7 @@ func TestAccCluster_basicGCP(t *testing.T) { }, { Config: configGCP(orgID, projectName, clusterName, "false"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -410,7 +410,7 @@ func TestAccCluster_WithBiConnectorGCP(t *testing.T) { Steps: []resource.TestStep{ { Config: configGCPWithBiConnector(orgID, projectName, clusterName, "true", false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -423,7 +423,7 @@ func TestAccCluster_WithBiConnectorGCP(t *testing.T) { }, { Config: configGCPWithBiConnector(orgID, projectName, clusterName, "false", true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -479,7 +479,7 @@ func TestAccCluster_MultiRegion(t *testing.T) { Steps: []resource.TestStep{ { Config: configMultiRegion(orgID, projectName, clusterName, "true", createRegionsConfig), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -494,7 +494,7 @@ func TestAccCluster_MultiRegion(t *testing.T) { }, { Config: configMultiRegion(orgID, projectName, clusterName, "false", updatedRegionsConfig), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -549,7 +549,7 @@ func TestAccCluster_ProviderRegionName(t *testing.T) { }, { Config: configSingleRegionWithProviderRegionName(orgID, projectName, clusterName, "false"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -564,7 +564,7 @@ func TestAccCluster_ProviderRegionName(t *testing.T) { }, { Config: configMultiRegion(orgID, projectName, clusterName, "false", updatedRegionsConfig), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -604,7 +604,7 @@ func TestAccCluster_Global(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigClusterGlobal(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -637,7 +637,7 @@ func TestAccCluster_AWSWithLabels(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasClusterAWSConfigdWithLabels(projectID, clusterName, "false", "M10", "US_WEST_2", []matlas.Label{}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -663,7 +663,7 @@ func TestAccCluster_AWSWithLabels(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -685,7 +685,7 @@ func TestAccCluster_AWSWithLabels(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -713,7 +713,7 @@ func TestAccCluster_WithTags(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithTags(orgID, projectName, clusterName, "false", "M10", "US_WEST_2", []matlas.Tag{}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -736,7 +736,7 @@ func TestAccCluster_WithTags(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -761,7 +761,7 @@ func TestAccCluster_WithTags(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -805,7 +805,7 @@ func TestAccCluster_withPrivateEndpointLink(t *testing.T) { { Config: configWithPrivateEndpointLink( awsAccessKey, awsSecretKey, projectID, providerName, region, vpcID, subnetID, securityGroupID, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), ), @@ -839,7 +839,7 @@ func TestAccCluster_withAzureNetworkPeering(t *testing.T) { Steps: []resource.TestStep{ { Config: configAzureWithNetworkPeering(projectID, providerName, directoryID, subcrptionID, resourceGroupName, vNetName, clusterName, atlasCidrBlock, region), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "name"), @@ -869,7 +869,7 @@ func TestAccCluster_withGCPNetworkPeering(t *testing.T) { Steps: []resource.TestStep{ { Config: configGCPWithNetworkPeering(gcpProjectID, gcpRegion, projectID, providerName, gcpPeeringName, clusterName, gcpClusterRegion), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -904,7 +904,7 @@ func TestAccCluster_withAzureAndContainerID(t *testing.T) { Steps: []resource.TestStep{ { Config: configAzureWithContainerID(projectID, clusterName, providerName, region, directoryID, subcrptionID, resourceGroupName, vNetName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "name"), resource.TestCheckResourceAttrSet(resourceName, "container_id"), @@ -935,7 +935,7 @@ func TestAccCluster_withAWSAndContainerID(t *testing.T) { Steps: []resource.TestStep{ { Config: configAWSWithContainerID(awsAccessKey, awsSecretKey, projectID, clusterName, providerName, awsRegion, vpcCIDRBlock, awsAccountID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "name"), resource.TestCheckResourceAttrSet(resourceName, "container_id"), @@ -965,7 +965,7 @@ func TestAccCluster_withGCPAndContainerID(t *testing.T) { Steps: []resource.TestStep{ { Config: configGCPWithContainerID(gcpProjectID, gcpRegion, projectID, clusterName, providerName, gcpClusterRegion, gcpPeeringName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), resource.TestCheckResourceAttr(resourceName, "disk_size_gb", "5"), @@ -999,7 +999,7 @@ func TestAccCluster_withAutoScalingAWS(t *testing.T) { Steps: []resource.TestStep{ { Config: configAWSWithAutoscaling(projectID, clusterName, "true", "false", "true", "false", minSize, maxSize, instanceSize), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1018,7 +1018,7 @@ func TestAccCluster_withAutoScalingAWS(t *testing.T) { }, { Config: configAWSWithAutoscaling(projectID, clusterName, "false", "true", "true", "true", minSizeUpdated, maxSizeUpdated, instanceSizeUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1049,7 +1049,7 @@ func TestAccCluster_tenant(t *testing.T) { Steps: []resource.TestStep{ { Config: configTenant(orgID, projectName, clusterName, "M2", "2", dbMajorVersion), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1059,7 +1059,7 @@ func TestAccCluster_tenant(t *testing.T) { }, { Config: configTenantUpdated(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1087,7 +1087,7 @@ func TestAccCluster_tenant_m5(t *testing.T) { Steps: []resource.TestStep{ { Config: configTenant(orgID, projectName, clusterName, "M5", "5", dbMajorVersion), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1114,7 +1114,7 @@ func TestAccCluster_basicGCPRegionNameWesternUS(t *testing.T) { Steps: []resource.TestStep{ { Config: configGCPRegionName(orgID, projectName, clusterName, regionName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), resource.TestCheckResourceAttr(resourceName, "provider_region_name", regionName), @@ -1139,7 +1139,7 @@ func TestAccCluster_basicGCPRegionNameUSWest2(t *testing.T) { Steps: []resource.TestStep{ { Config: configGCPRegionName(orgID, projectName, clusterName, regionName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), resource.TestCheckResourceAttr(resourceName, "provider_region_name", regionName), @@ -1238,7 +1238,7 @@ func TestAccCluster_RegionsConfig(t *testing.T) { Steps: []resource.TestStep{ { Config: configRegions(orgID, projectName, clusterName, replications), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), resource.TestCheckResourceAttr(resourceName, "replication_specs.#", "3"), @@ -1246,7 +1246,7 @@ func TestAccCluster_RegionsConfig(t *testing.T) { }, { Config: configRegions(orgID, projectName, clusterName, replicationsUpdate), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), resource.TestCheckResourceAttr(resourceName, "replication_specs.#", "2"), @@ -1254,7 +1254,7 @@ func TestAccCluster_RegionsConfig(t *testing.T) { }, { Config: configRegions(orgID, projectName, clusterName, replicationsShardsUpdate), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), resource.TestCheckResourceAttr(resourceName, "replication_specs.#", "2"), @@ -1280,7 +1280,7 @@ func TestAccCluster_basicAWS_UnpauseToPaused(t *testing.T) { Steps: []resource.TestStep{ { Config: configAWSPaused(projectID, clusterName, true, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1293,7 +1293,7 @@ func TestAccCluster_basicAWS_UnpauseToPaused(t *testing.T) { }, { Config: configAWSPaused(projectID, clusterName, false, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1328,7 +1328,7 @@ func TestAccCluster_basicAWS_PausedToUnpaused(t *testing.T) { Steps: []resource.TestStep{ { Config: configAWSPaused(projectID, clusterName, true, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), @@ -1341,7 +1341,7 @@ func TestAccCluster_basicAWS_PausedToUnpaused(t *testing.T) { }, { Config: configAWSPaused(projectID, clusterName, false, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", clusterName), diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go index c58d288034..3a9cc5f190 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go @@ -22,7 +22,7 @@ func TestMigOutageSimulationCluster_SingleRegion_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "outage_filters.#"), @@ -50,7 +50,7 @@ func TestMigOutageSimulationCluster_MultiRegion_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "outage_filters.#"), diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go index 60a2d9f9fc..a1224b620e 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go @@ -29,7 +29,7 @@ func TestAccOutageSimulationCluster_SingleRegion_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configSingleRegion(projectID, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "outage_filters.#"), @@ -62,7 +62,7 @@ func TestAccOutageSimulationCluster_MultiRegion_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configMultiRegion(projectID, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "outage_filters.#"), diff --git a/internal/service/controlplaneipaddresses/data_source_test.go b/internal/service/controlplaneipaddresses/data_source_test.go index 5058af3f9f..5cd421da6c 100644 --- a/internal/service/controlplaneipaddresses/data_source_test.go +++ b/internal/service/controlplaneipaddresses/data_source_test.go @@ -15,7 +15,7 @@ func TestAccControlPlaneIpAddressesDS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrWith(dataSourceName, "outbound.aws.us-east-1.0", acc.CIDRBlockExpression()), ), }, diff --git a/internal/service/customdbrole/data_source_custom_db_role_test.go b/internal/service/customdbrole/data_source_custom_db_role_test.go index cd6ee894b4..9fe37d7098 100644 --- a/internal/service/customdbrole/data_source_custom_db_role_test.go +++ b/internal/service/customdbrole/data_source_custom_db_role_test.go @@ -26,7 +26,7 @@ func TestAccConfigDSCustomDBRole_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(orgID, projectName, roleName, "INSERT", databaseName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // Test for Resource checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), diff --git a/internal/service/customdbrole/data_source_custom_db_roles_test.go b/internal/service/customdbrole/data_source_custom_db_roles_test.go index 43f2020357..5550d7560b 100644 --- a/internal/service/customdbrole/data_source_custom_db_roles_test.go +++ b/internal/service/customdbrole/data_source_custom_db_roles_test.go @@ -26,7 +26,7 @@ func TestAccConfigDSCustomDBRoles_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDSPlural(orgID, projectName, roleName, "INSERT", databaseName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // Test for Resource checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), diff --git a/internal/service/customdbrole/resource_custom_db_role_migration_test.go b/internal/service/customdbrole/resource_custom_db_role_migration_test.go index cc67a1058f..3adb574d31 100644 --- a/internal/service/customdbrole/resource_custom_db_role_migration_test.go +++ b/internal/service/customdbrole/resource_custom_db_role_migration_test.go @@ -25,7 +25,7 @@ func TestMigConfigCustomDBRoles_Basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "role_name", roleName), diff --git a/internal/service/customdbrole/resource_custom_db_role_test.go b/internal/service/customdbrole/resource_custom_db_role_test.go index 87a67c032c..af2e6282b6 100644 --- a/internal/service/customdbrole/resource_custom_db_role_test.go +++ b/internal/service/customdbrole/resource_custom_db_role_test.go @@ -32,7 +32,7 @@ func TestAccConfigRSCustomDBRoles_Basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, roleName, "INSERT", databaseName1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "role_name", roleName), @@ -43,7 +43,7 @@ func TestAccConfigRSCustomDBRoles_Basic(t *testing.T) { }, { Config: configBasic(orgID, projectName, roleName, "UPDATE", databaseName2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "role_name", roleName), @@ -141,7 +141,7 @@ func TestAccConfigRSCustomDBRoles_WithInheritedRoles(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithInheritedRoles(orgID, projectName, inheritRole, testRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // For Inherited Roles // inherited Role [0] @@ -172,7 +172,7 @@ func TestAccConfigRSCustomDBRoles_WithInheritedRoles(t *testing.T) { }, { Config: configWithInheritedRoles(orgID, projectName, inheritRoleUpdated, testRoleUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // For Inherited Role // inherited Role [0] @@ -332,7 +332,7 @@ func TestAccConfigRSCustomDBRoles_MultipleCustomRoles(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithMultiple(orgID, projectName, inheritRole, testRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // For Inherited Role checkExists(InheritedRoleResourceName), @@ -353,7 +353,7 @@ func TestAccConfigRSCustomDBRoles_MultipleCustomRoles(t *testing.T) { }, { Config: configWithMultiple(orgID, projectName, inheritRoleUpdated, testRoleUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // For Inherited Role checkExists(InheritedRoleResourceName), @@ -393,7 +393,7 @@ func TestAccConfigRSCustomDBRoles_MultipleResources(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, roleName, "INSERT", acc.RandomClusterName()), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "role_name", roleName), @@ -494,7 +494,7 @@ func TestAccConfigRSCustomDBRoles_UpdatedInheritRoles(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithMultiple(orgID, projectName, inheritRole, testRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // For Inherited Role checkExists(InheritedRoleResourceName), @@ -514,7 +514,7 @@ func TestAccConfigRSCustomDBRoles_UpdatedInheritRoles(t *testing.T) { }, { Config: configWithMultiple(orgID, projectName, inheritRoleUpdated, testRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( // For Inherited Role checkExists(InheritedRoleResourceName), diff --git a/internal/service/customdnsconfigurationclusteraws/data_source_custom_dns_configuration_cluster_aws_test.go b/internal/service/customdnsconfigurationclusteraws/data_source_custom_dns_configuration_cluster_aws_test.go index 591d8c8811..13a265220c 100644 --- a/internal/service/customdnsconfigurationclusteraws/data_source_custom_dns_configuration_cluster_aws_test.go +++ b/internal/service/customdnsconfigurationclusteraws/data_source_custom_dns_configuration_cluster_aws_test.go @@ -21,7 +21,7 @@ func TestAccConfigDSCustomDNSConfigurationAWS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(orgID, projectName, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(dataSourceName), resource.TestCheckResourceAttrSet(dataSourceName, "project_id"), resource.TestCheckResourceAttr(dataSourceName, "enabled", "true"), diff --git a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_migration_test.go b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_migration_test.go index 4dc6ae83df..e78b164e72 100644 --- a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_migration_test.go +++ b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_migration_test.go @@ -23,7 +23,7 @@ func TestMigConfigRSCustomDNSConfigurationAWS_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), diff --git a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_test.go b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_test.go index 65580fc4cd..68a2ed2985 100644 --- a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_test.go +++ b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws_test.go @@ -26,7 +26,7 @@ func TestAccConfigRSCustomDNSConfigurationAWS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), @@ -34,7 +34,7 @@ func TestAccConfigRSCustomDNSConfigurationAWS_basic(t *testing.T) { }, { Config: configBasic(orgID, projectName, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "enabled", "false"), @@ -42,7 +42,7 @@ func TestAccConfigRSCustomDNSConfigurationAWS_basic(t *testing.T) { }, { Config: configBasic(orgID, projectName, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), diff --git a/internal/service/databaseuser/data_source_database_user_test.go b/internal/service/databaseuser/data_source_database_user_test.go index f85131c710..4504a1d48d 100644 --- a/internal/service/databaseuser/data_source_database_user_test.go +++ b/internal/service/databaseuser/data_source_database_user_test.go @@ -21,7 +21,7 @@ func TestAccConfigDSDatabaseUser_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(projectID, username, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(dataSourceName), resource.TestCheckResourceAttr(dataSourceName, "project_id", projectID), resource.TestCheckResourceAttr(dataSourceName, "username", username), diff --git a/internal/service/databaseuser/data_source_database_users_test.go b/internal/service/databaseuser/data_source_database_users_test.go index 4f30975910..835a6e7786 100644 --- a/internal/service/databaseuser/data_source_database_users_test.go +++ b/internal/service/databaseuser/data_source_database_users_test.go @@ -21,7 +21,7 @@ func TestAccConfigDSDatabaseUsers_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDSPlural(projectID, username, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(dataSourcePluralName, "project_id", projectID), resource.TestCheckResourceAttr(dataSourcePluralName, "results.#", "2"), resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.username"), diff --git a/internal/service/databaseuser/resource_database_user_migration_test.go b/internal/service/databaseuser/resource_database_user_migration_test.go index 3d1114a0d1..58c706740e 100644 --- a/internal/service/databaseuser/resource_database_user_migration_test.go +++ b/internal/service/databaseuser/resource_database_user_migration_test.go @@ -24,7 +24,7 @@ func TestMigConfigRSDatabaseUser_Basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "password", "test-acc-password"), @@ -52,7 +52,7 @@ func TestMigConfigRSDatabaseUser_withX509TypeCustomer(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "x509_type", x509Type), @@ -77,7 +77,7 @@ func TestMigConfigRSDatabaseUser_withAWSIAMType(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "aws_iam_type", "USER"), @@ -115,7 +115,7 @@ func TestMigConfigRSDatabaseUser_withLabels(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -142,7 +142,7 @@ func TestMigConfigRSDatabaseUser_withEmptyLabels(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "password", "test-acc-password"), @@ -182,7 +182,7 @@ func TestMigConfigRSDatabaseUser_withRoles(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "password", password), @@ -217,7 +217,7 @@ func TestMigConfigRSDatabaseUser_withScopes(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "password", password), @@ -246,7 +246,7 @@ func TestMigConfigRSDatabaseUser_withEmptyScopes(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "password", password), @@ -272,7 +272,7 @@ func TestMigConfigRSDatabaseUser_withLDAPAuthType(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), resource.TestCheckResourceAttr(resourceName, "ldap_auth_type", "USER"), diff --git a/internal/service/databaseuser/resource_database_user_test.go b/internal/service/databaseuser/resource_database_user_test.go index a4c3f7dffc..9f3c133604 100644 --- a/internal/service/databaseuser/resource_database_user_test.go +++ b/internal/service/databaseuser/resource_database_user_test.go @@ -33,7 +33,7 @@ func TestAccConfigRSDatabaseUser_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigDatabaseUserBasic(projectID, username, "atlasAdmin", "First Key", "First value"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -48,7 +48,7 @@ func TestAccConfigRSDatabaseUser_basic(t *testing.T) { }, { Config: acc.ConfigDatabaseUserBasic(projectID, username, "read", "Second Key", "Second value"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -86,7 +86,7 @@ func TestAccConfigRSDatabaseUser_withX509TypeCustomer(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigDatabaseUserWithX509Type(projectID, username, x509Type, "atlasAdmin", "First Key", "First value"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -119,7 +119,7 @@ func TestAccConfigRSDatabaseUser_withX509TypeManaged(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigDatabaseUserWithX509Type(projectID, username, x509Type, "atlasAdmin", "First Key", "First value"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -145,7 +145,7 @@ func TestAccConfigRSDatabaseUser_withAWSIAMType(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigDatabaseUserWithAWSIAMType(projectID, username, "atlasAdmin", "First Key", "First value"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -177,7 +177,7 @@ func TestAccConfigRSDatabaseUser_withLabels(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigDatabaseUserWithLabels(projectID, username, "atlasAdmin", nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -199,7 +199,7 @@ func TestAccConfigRSDatabaseUser_withLabels(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -225,7 +225,7 @@ func TestAccConfigRSDatabaseUser_withLabels(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -265,7 +265,7 @@ func TestAccConfigRSDatabaseUser_withRoles(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -285,7 +285,7 @@ func TestAccConfigRSDatabaseUser_withRoles(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -324,7 +324,7 @@ func TestAccConfigRSDatabaseUser_withScopes(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -346,7 +346,7 @@ func TestAccConfigRSDatabaseUser_withScopes(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -387,7 +387,7 @@ func TestAccConfigRSDatabaseUser_updateToEmptyScopes(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -402,7 +402,7 @@ func TestAccConfigRSDatabaseUser_updateToEmptyScopes(t *testing.T) { }, { Config: acc.ConfigDatabaseUserWithScopes(projectID, username, password, "atlasAdmin", nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -439,7 +439,7 @@ func TestAccConfigRSDatabaseUser_updateToEmptyLabels(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "labels.#", "2"), @@ -451,7 +451,7 @@ func TestAccConfigRSDatabaseUser_updateToEmptyLabels(t *testing.T) { }, { Config: acc.ConfigDatabaseUserWithLabels(projectID, username, "atlasAdmin", nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "labels.#", "0"), ), @@ -473,7 +473,7 @@ func TestAccConfigRSDatabaseUser_withLDAPAuthType(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigDatabaseUserWithLDAPAuthType(projectID, username, "atlasAdmin", "First Key", "First value"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -509,7 +509,7 @@ func TestAccCOnfigRSDatabaseUser_withOIDCAuthType(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigDataBaseUserWithOIDCAuthType(projectID, usernameWorkforce, workforceAuthType, "admin", "atlasAdmin"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", usernameWorkforce), @@ -519,7 +519,7 @@ func TestAccCOnfigRSDatabaseUser_withOIDCAuthType(t *testing.T) { }, { Config: acc.ConfigDataBaseUserWithOIDCAuthType(projectID, usernameWorkload, workloadAuthType, "$external", "atlasAdmin"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "username", usernameWorkload), diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go index 7768399b65..a83c112cc6 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run_test.go @@ -25,7 +25,7 @@ func TestAccDataLakeRunDS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configRunDS(projectID, pipelineName, runID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "project_id"), resource.TestCheckResourceAttr(dataSourceName, "pipeline_name", pipelineName), resource.TestCheckResourceAttrSet(dataSourceName, "id"), diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go index bb89cd57d0..92d5548e20 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs_test.go @@ -24,7 +24,7 @@ func TestAccDataLakeRunDSPlural_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configRunDSPlural(projectID, pipelineName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "project_id"), resource.TestCheckResourceAttr(dataSourceName, "pipeline_name", pipelineName), resource.TestCheckResourceAttrSet(dataSourceName, "results.#"), diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go index d41d1d831f..271cb8fd37 100644 --- a/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go +++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_migration_test.go @@ -25,7 +25,7 @@ func TestMigcDataLakePipeline_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go index b83a96f5f2..81f765bc70 100644 --- a/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go +++ b/internal/service/datalakepipeline/resource_data_lake_pipeline_test.go @@ -31,7 +31,7 @@ func TestAccDataLakePipeline_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicWithPluralDS(orgID, projectName, firstClusterName, secondClusterName, firstPipelineName, secondPipelineName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", firstPipelineName), diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go index 7fb143fbce..7261469b00 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go @@ -34,7 +34,7 @@ func TestMigEncryptionAtRest_basicAWS(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: testAccMongoDBAtlasEncryptionAtRestConfigAwsKms(projectID, &awsKms), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "aws_kms_config.0.enabled", "true"), @@ -86,7 +86,7 @@ func TestMigEncryptionAtRest_withRole_basicAWS(t *testing.T) { ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Config: testAccMongoDBAtlasEncryptionAtRestConfigAwsKmsWithRole(awsKms.GetRegion(), accessKeyID, secretKey, projectID, policyName, roleName, false, &awsKms), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "aws_kms_config.0.enabled", "true"), @@ -131,7 +131,7 @@ func TestMigEncryptionAtRest_basicAzure(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: testAccMongoDBAtlasEncryptionAtRestConfigAzureKeyVault(projectID, &azureKeyVault), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "azure_key_vault_config.0.enabled", "true"), @@ -175,7 +175,7 @@ func TestMigEncryptionAtRest_basicGCP(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: testAccMongoDBAtlasEncryptionAtRestConfigGoogleCloudKms(projectID, &googleCloudKms), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "google_cloud_kms_config.0.enabled", "true"), @@ -219,7 +219,7 @@ func TestMigEncryptionAtRest_basicAWS_from_v1_11_0(t *testing.T) { { ExternalProviders: acc.ExternalProvidersWithAWS("1.11.0"), Config: testAccMongoDBAtlasEncryptionAtRestConfigAwsKms(projectID, &awsKms), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "aws_kms_config.0.enabled", "true"), diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go index 12ce988a66..2b8ddbccfd 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go @@ -131,7 +131,7 @@ func TestAccEncryptionAtRest_basicAWS(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasEncryptionAtRestConfigAwsKms(projectID, &awsKms), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "aws_kms_config.0.enabled", "true"), @@ -144,7 +144,7 @@ func TestAccEncryptionAtRest_basicAWS(t *testing.T) { }, { Config: testAccMongoDBAtlasEncryptionAtRestConfigAwsKms(projectID, &awsKmsUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "aws_kms_config.0.enabled", "true"), @@ -204,7 +204,7 @@ func TestAccEncryptionAtRest_basicAzure(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasEncryptionAtRestConfigAzureKeyVault(projectID, &azureKeyVault), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "azure_key_vault_config.0.enabled", "true"), @@ -215,7 +215,7 @@ func TestAccEncryptionAtRest_basicAzure(t *testing.T) { }, { Config: testAccMongoDBAtlasEncryptionAtRestConfigAzureKeyVault(projectID, &azureKeyVaultUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "azure_key_vault_config.0.enabled", "true"), @@ -263,7 +263,7 @@ func TestAccEncryptionAtRest_basicGCP(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasEncryptionAtRestConfigGoogleCloudKms(projectID, &googleCloudKms), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "google_cloud_kms_config.0.enabled", "true"), @@ -271,7 +271,7 @@ func TestAccEncryptionAtRest_basicGCP(t *testing.T) { }, { Config: testAccMongoDBAtlasEncryptionAtRestConfigGoogleCloudKms(projectID, &googleCloudKmsUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "google_cloud_kms_config.0.enabled", "true"), @@ -316,7 +316,7 @@ func TestAccEncryptionAtRestWithRole_basicAWS(t *testing.T) { }, { Config: testAccMongoDBAtlasEncryptionAtRestConfigAwsKmsWithRole(awsKms.GetRegion(), accessKeyID, secretKey, projectID, policyName, roleName, true, &awsKms), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasEncryptionAtRestExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), diff --git a/internal/service/eventtrigger/data_source_event_trigger_test.go b/internal/service/eventtrigger/data_source_event_trigger_test.go index b86bce6d50..c8a24b384d 100644 --- a/internal/service/eventtrigger/data_source_event_trigger_test.go +++ b/internal/service/eventtrigger/data_source_event_trigger_test.go @@ -42,7 +42,7 @@ func TestAccEventTriggerDS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasDataSourceEventTriggerConfig(projectID, appID, `"INSERT", "UPDATE"`, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), ), }, diff --git a/internal/service/eventtrigger/data_source_event_triggers_test.go b/internal/service/eventtrigger/data_source_event_triggers_test.go index c5cc2e6e37..a063148583 100644 --- a/internal/service/eventtrigger/data_source_event_triggers_test.go +++ b/internal/service/eventtrigger/data_source_event_triggers_test.go @@ -43,7 +43,7 @@ func TestAccEventTriggerDSPlural_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasEventTriggersDataSourceConfig(projectID, appID, `"INSERT", "UPDATE"`, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), ), }, diff --git a/internal/service/eventtrigger/resource_event_trigger_test.go b/internal/service/eventtrigger/resource_event_trigger_test.go index 3a53331359..769d0d33b5 100644 --- a/internal/service/eventtrigger/resource_event_trigger_test.go +++ b/internal/service/eventtrigger/resource_event_trigger_test.go @@ -55,14 +55,14 @@ func TestAccEventTrigger_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDatabaseTrigger(projectID, appID, `"INSERT", "UPDATE"`, &event, false, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configDatabaseTrigger(projectID, appID, `"INSERT", "UPDATE", "DELETE"`, &eventUpdated, true, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -106,7 +106,7 @@ func TestAccEventTrigger_databaseNoCollection(t *testing.T) { Steps: []resource.TestStep{ { Config: configDatabaseNoCollectionTrigger(projectID, appID, `"INSERT", "UPDATE"`, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "config_database", event.Config.Database), @@ -167,14 +167,14 @@ func TestAccEventTrigger_databaseEventProccesor(t *testing.T) { Steps: []resource.TestStep{ { Config: configDatabaseEPTrigger(projectID, appID, `"INSERT", "UPDATE"`, eventBridgeAwsAccountID, eventBridgeAwsRegion, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configDatabaseEPTrigger(projectID, appID, `"INSERT", "UPDATE", "DELETE"`, eventBridgeAwsAccountID, eventBridgeAwsRegion, &eventUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -226,14 +226,14 @@ func TestAccEventTrigger_authBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: configAuthenticationTrigger(projectID, appID, `"anon-user", "local-userpass"`, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configAuthenticationTrigger(projectID, appID, `"anon-user", "local-userpass", "api-key"`, &eventUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -287,14 +287,14 @@ func TestAccEventTrigger_authEventProcessor(t *testing.T) { Steps: []resource.TestStep{ { Config: configAuthenticationEPTrigger(projectID, appID, `"anon-user", "local-userpass"`, eventBridgeAwsAccountID, eventBridgeAwsRegion, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configAuthenticationEPTrigger(projectID, appID, `"anon-user", "local-userpass", "api-key"`, eventBridgeAwsAccountID, eventBridgeAwsRegion, &eventUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -343,14 +343,14 @@ func TestAccEventTrigger_scheduleBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: configScheduleTrigger(projectID, appID, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configScheduleTrigger(projectID, appID, &eventUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -401,14 +401,14 @@ func TestAccEventTrigger_scheduleEventProcessor(t *testing.T) { Steps: []resource.TestStep{ { Config: configScheduleEPTrigger(projectID, appID, eventBridgeAwsAccountID, eventBridgeAwsRegion, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configScheduleEPTrigger(projectID, appID, eventBridgeAwsAccountID, eventBridgeAwsRegion, &eventUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), @@ -457,14 +457,14 @@ func TestAccEventTrigger_functionBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: configScheduleTrigger(projectID, appID, &event), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), }, { Config: configScheduleTrigger(projectID, appID, &eventUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), ), diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go index 42f6b174df..8e54b4d5f0 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go @@ -34,7 +34,7 @@ func TestAccFederatedDatabaseInstanceDS_s3Bucket(t *testing.T) { ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Config: configDSWithS3Bucket(policyName, roleName, projectName, orgID, name, testS3Bucket), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName, &federatedInstance), checkAttributes(&federatedInstance, name), resource.TestCheckResourceAttrSet(resourceName, "project_id"), diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instances_test.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instances_test.go index 5d2d7ff9c0..982cc7febe 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instances_test.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instances_test.go @@ -29,7 +29,7 @@ func TestAccFederatedDatabaseInstanceDSPlural_basic(t *testing.T) { ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Config: configDSPlural(policyName, roleName, projectName, orgID, firstName, secondName, testS3Bucket), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "results.#"), ), diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance_migration_test.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance_migration_test.go index 6c8016bf09..ead5ad5561 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance_migration_test.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance_migration_test.go @@ -25,7 +25,7 @@ func TestMigFederatedDatabaseInstance_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: configFirstSteps(name, projectName, orgID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), resource.TestCheckResourceAttrSet(resourceName, "storage_stores.0.read_preference.0.tag_sets.#"), diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go index 5183b7dd17..7c95aa741b 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go @@ -42,11 +42,11 @@ func TestAccFederatedDatabaseInstance_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configFirstSteps(name, projectName, orgID), - Check: resource.ComposeTestCheckFunc(firstStepChecks...), + Check: resource.ComposeAggregateTestCheckFunc(firstStepChecks...), }, { Config: configFirstStepsUpdate(name, projectName, orgID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), resource.TestCheckResourceAttrSet(resourceName, "storage_stores.0.read_preference.0.tag_sets.#"), @@ -95,7 +95,7 @@ func TestAccFederatedDatabaseInstance_s3bucket(t *testing.T) { ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Config: configWithS3Bucket(policyName, roleName, projectName, orgID, name, testS3Bucket), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), ), @@ -128,7 +128,7 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Config: configWithCluster(orgID, projectName, clusterName1, clusterName2, name), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), resource.TestCheckResourceAttrSet(resourceName, "storage_stores.0.read_preference.0.tag_sets.#"), diff --git a/internal/service/federatedquerylimit/resource_federated_query_limit_test.go b/internal/service/federatedquerylimit/resource_federated_query_limit_test.go index 847476a94e..786ce2a083 100644 --- a/internal/service/federatedquerylimit/resource_federated_query_limit_test.go +++ b/internal/service/federatedquerylimit/resource_federated_query_limit_test.go @@ -44,7 +44,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Config: configBasic(policyName, roleName, projectName, orgID, tenantName, testS3Bucket), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "tenant_name"), resource.TestCheckResourceAttr(resourceName, "limit_name", limitName), diff --git a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go index 5054e6ca49..b881efc52f 100644 --- a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go +++ b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_provider_test.go @@ -21,7 +21,7 @@ func TestAccFederatedSettingsIdentityProviderDS_samlBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicDS(federatedSettingsID, idpID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "federation_settings_id"), resource.TestCheckResourceAttrSet(resourceName, "associated_orgs.#"), resource.TestCheckResourceAttrSet(resourceName, "acs_url"), diff --git a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go index 073011305f..65fdfa7a5c 100644 --- a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go +++ b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go @@ -23,14 +23,14 @@ func TestAccFederatedSettingsIdentityProvidersDS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configPluralDS(federatedSettingsID, conversion.StringPtr(federatedsettingsidentityprovider.WORKFORCE), []string{oidcProtocol, samlProtocol}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "federation_settings_id"), resource.TestCheckResourceAttr(dataSourceName, "results.#", "2"), ), }, { Config: configPluralDS(federatedSettingsID, conversion.StringPtr(federatedsettingsidentityprovider.WORKFORCE), []string{samlProtocol}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "federation_settings_id"), resource.TestCheckResourceAttr(dataSourceName, "results.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "results.0.display_name", "SAML-test"), @@ -38,7 +38,7 @@ func TestAccFederatedSettingsIdentityProvidersDS_basic(t *testing.T) { }, { Config: configPluralDS(federatedSettingsID, conversion.StringPtr(federatedsettingsidentityprovider.WORKFORCE), []string{oidcProtocol}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "federation_settings_id"), resource.TestCheckResourceAttr(dataSourceName, "results.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "results.0.display_name", "OIDC-test"), @@ -46,7 +46,7 @@ func TestAccFederatedSettingsIdentityProvidersDS_basic(t *testing.T) { }, { Config: configPluralDS(federatedSettingsID, conversion.StringPtr(federatedsettingsidentityprovider.WORKFORCE), []string{}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "federation_settings_id"), resource.TestCheckResourceAttr(dataSourceName, "results.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "results.0.display_name", "SAML-test"), // if no protocol is specified, it defaults to SAML @@ -54,7 +54,7 @@ func TestAccFederatedSettingsIdentityProvidersDS_basic(t *testing.T) { }, { Config: configPluralDS(federatedSettingsID, conversion.StringPtr(federatedsettingsidentityprovider.WORKLOAD), []string{}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "federation_settings_id"), resource.TestCheckResourceAttr(dataSourceName, "results.#", "0"), ), diff --git a/internal/service/federatedsettingsidentityprovider/resource_federated_settings_identity_provider_test.go b/internal/service/federatedsettingsidentityprovider/resource_federated_settings_identity_provider_test.go index 280dd433b6..4b7b5e4647 100644 --- a/internal/service/federatedsettingsidentityprovider/resource_federated_settings_identity_provider_test.go +++ b/internal/service/federatedsettingsidentityprovider/resource_federated_settings_identity_provider_test.go @@ -73,7 +73,7 @@ func basicSAMLTestCase(tb testing.TB) *resource.TestCase { }, { Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName, idpID), resource.TestCheckResourceAttr(resourceName, "federation_settings_id", federationSettingsID), resource.TestCheckResourceAttr(resourceName, "name", "SAML-test"), @@ -125,11 +125,11 @@ func basicOIDCWorkforceTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configOIDCWorkforceBasic(federationSettingsID, associatedDomain, description1, audience1), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { Config: configOIDCWorkforceBasic(federationSettingsID, associatedDomain, description2, audience2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsManaged(resourceName), resource.TestCheckResourceAttr(resourceName, "description", description2), resource.TestCheckResourceAttr(resourceName, "audience", audience2), diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_org_test.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_org_test.go index 2a185d2f5d..2af1a50f89 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_org_test.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_org_test.go @@ -22,7 +22,7 @@ func TestAccFederatedSettingsOrgDS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicDS(federatedSettingsID, orgID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "federation_settings_id"), resource.TestCheckResourceAttrSet(resourceName, "role_mappings.#"), resource.TestCheckResourceAttrSet(resourceName, "data_access_identity_provider_ids.#"), diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs_test.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs_test.go index 306d1fb029..c33691f4ea 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs_test.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs_test.go @@ -21,7 +21,7 @@ func TestAccFederatedSettingsOrgDSPlural_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicPluralDS(federatedSettingsID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "federation_settings_id"), resource.TestCheckResourceAttrSet(resourceName, "results.#"), resource.TestCheckResourceAttrSet(resourceName, "results.0.identity_provider_id"), diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_test.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_test.go index 4ba6a3fdd5..c03072e955 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_test.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_test.go @@ -25,7 +25,7 @@ func TestAccFederatedSettingsDS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasDataSourceFederatedSettingsConfig(orgID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasFederatedSettingsExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), diff --git a/internal/service/federatedsettingsorgconfig/resource_federated_settings_connected_org_test.go b/internal/service/federatedsettingsorgconfig/resource_federated_settings_connected_org_test.go index 62646ab537..94542639b3 100644 --- a/internal/service/federatedsettingsorgconfig/resource_federated_settings_connected_org_test.go +++ b/internal/service/federatedsettingsorgconfig/resource_federated_settings_connected_org_test.go @@ -60,7 +60,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { }, { Config: configWithIdps, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "federation_settings_id", federationSettingsID), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -72,7 +72,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { }, { Config: configDetachedIdps, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "data_access_identity_provider_ids.#", "0"), resource.TestCheckResourceAttr(resourceName, "identity_provider_id", ""), @@ -80,7 +80,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { }, { Config: configWithDomainRestriction, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "domain_restriction_enabled", "true"), resource.TestCheckResourceAttrSet(resourceName, "user_conflicts.#"), diff --git a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping_test.go b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping_test.go index 08d27c6540..6987f88630 100644 --- a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping_test.go +++ b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping_test.go @@ -52,11 +52,11 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(federationSettingsID, orgID, groupID, extGroupName1), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { Config: configBasic(federationSettingsID, orgID, groupID, extGroupName2), - Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttr(resourceName, "external_group_name", extGroupName2)), + Check: resource.ComposeAggregateTestCheckFunc(resource.TestCheckResourceAttr(resourceName, "external_group_name", extGroupName2)), }, { Config: configBasic(federationSettingsID, orgID, groupID, extGroupName2), diff --git a/internal/service/globalclusterconfig/data_source_global_cluster_config_test.go b/internal/service/globalclusterconfig/data_source_global_cluster_config_test.go index 23e036a9aa..fb50184c0f 100644 --- a/internal/service/globalclusterconfig/data_source_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/data_source_global_cluster_config_test.go @@ -19,7 +19,7 @@ func TestAccClusterRSGlobalClusterDS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(&clusterInfo, false, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "project_id"), resource.TestCheckResourceAttrSet(dataSourceName, "cluster_name"), ), diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go index 070a57c10b..c70697344c 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go @@ -21,7 +21,7 @@ func TestMigClusterRSGlobalCluster_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mappings.#"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go index 73f45a4c48..342a354f21 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go @@ -25,7 +25,7 @@ func TestAccClusterRSGlobalCluster_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(&clusterInfo, false, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mappings.#"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), @@ -57,7 +57,7 @@ func TestAccClusterRSGlobalCluster_withAWSAndBackup(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(&clusterInfo, false, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mappings.#"), @@ -90,7 +90,7 @@ func TestAccClusterRSGlobalCluster_database(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithDBConfig(&clusterInfo, customZone), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.#", "5"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mappings.#"), diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go index 35120ebf3d..7db034a5dd 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go @@ -40,7 +40,7 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithVerify(projectID, clusterName, hostname, username, password, caCertificate, cast.ToInt(port), true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "hostname", hostname), @@ -83,7 +83,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, hostname, username, password, authEnabled, cast.ToInt(port)), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "hostname", hostname), diff --git a/internal/service/ldapverify/resource_ldap_verify_test.go b/internal/service/ldapverify/resource_ldap_verify_test.go index df5c5a03fb..cc285978aa 100644 --- a/internal/service/ldapverify/resource_ldap_verify_test.go +++ b/internal/service/ldapverify/resource_ldap_verify_test.go @@ -38,7 +38,7 @@ func TestAccLDAPVerify_withConfiguration_CACertificate(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithConfiguration(projectID, hostname, username, password, caCertificate, cast.ToInt(port), true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "request_id"), @@ -74,7 +74,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, hostname, username, password, cast.ToInt(port)), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "request_id"), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), diff --git a/internal/service/maintenancewindow/data_source_maintenance_window_test.go b/internal/service/maintenancewindow/data_source_maintenance_window_test.go index 7188cfcc05..6d44f7377d 100644 --- a/internal/service/maintenancewindow/data_source_maintenance_window_test.go +++ b/internal/service/maintenancewindow/data_source_maintenance_window_test.go @@ -26,7 +26,7 @@ func TestAccConfigDSMaintenanceWindow_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(orgID, projectName, dayOfWeek, hourOfDay), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(dataSourceName), resource.TestCheckResourceAttrSet(dataSourceName, "project_id"), resource.TestCheckResourceAttr(dataSourceName, "day_of_week", cast.ToString(dayOfWeek)), diff --git a/internal/service/maintenancewindow/resource_maintenance_window_migration_test.go b/internal/service/maintenancewindow/resource_maintenance_window_migration_test.go index f7a1b6021f..d5da2376fc 100644 --- a/internal/service/maintenancewindow/resource_maintenance_window_migration_test.go +++ b/internal/service/maintenancewindow/resource_maintenance_window_migration_test.go @@ -25,7 +25,7 @@ func TestMigConfigMaintenanceWindow_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "day_of_week", cast.ToString(dayOfWeek)), diff --git a/internal/service/maintenancewindow/resource_maintenance_window_test.go b/internal/service/maintenancewindow/resource_maintenance_window_test.go index ac2ca659a3..86e090f91a 100644 --- a/internal/service/maintenancewindow/resource_maintenance_window_test.go +++ b/internal/service/maintenancewindow/resource_maintenance_window_test.go @@ -31,7 +31,7 @@ func TestAccConfigRSMaintenanceWindow_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, dayOfWeek, hourOfDay), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "day_of_week", cast.ToString(dayOfWeek)), @@ -41,7 +41,7 @@ func TestAccConfigRSMaintenanceWindow_basic(t *testing.T) { }, { Config: configBasic(orgID, projectName, dayOfWeek, hourOfDayUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "day_of_week", cast.ToString(dayOfWeek)), @@ -51,7 +51,7 @@ func TestAccConfigRSMaintenanceWindow_basic(t *testing.T) { }, { Config: configBasic(orgID, projectName, dayOfWeekUpdated, hourOfDay), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "day_of_week", cast.ToString(dayOfWeekUpdated)), @@ -61,7 +61,7 @@ func TestAccConfigRSMaintenanceWindow_basic(t *testing.T) { }, { Config: configBasic(orgID, projectName, dayOfWeek, hourOfDay), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "day_of_week", cast.ToString(dayOfWeek)), @@ -93,7 +93,7 @@ func TestAccConfigRSMaintenanceWindow_autoDeferActivated(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithAutoDeferEnabled(orgID, projectName, dayOfWeek, hourOfDay), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "day_of_week", cast.ToString(dayOfWeek)), diff --git a/internal/service/networkcontainer/resource_network_container_migration_test.go b/internal/service/networkcontainer/resource_network_container_migration_test.go index 0258e4e67a..3995d8e934 100644 --- a/internal/service/networkcontainer/resource_network_container_migration_test.go +++ b/internal/service/networkcontainer/resource_network_container_migration_test.go @@ -27,7 +27,7 @@ func TestMigNetworkContainer_basicAWS(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AWS)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AWS)...), }, mig.TestStepCheckEmptyPlan(config), }, @@ -50,7 +50,7 @@ func TestMigNetworkContainer_basicAzure(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AZURE)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AZURE)...), }, mig.TestStepCheckEmptyPlan(config), }, @@ -73,7 +73,7 @@ func TestMigNetworkContainer_basicGCP(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc(commonChecks(constant.GCP)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.GCP)...), }, mig.TestStepCheckEmptyPlan(config), }, diff --git a/internal/service/networkcontainer/resource_network_container_test.go b/internal/service/networkcontainer/resource_network_container_test.go index 11dc869fd1..6d41197a30 100644 --- a/internal/service/networkcontainer/resource_network_container_test.go +++ b/internal/service/networkcontainer/resource_network_container_test.go @@ -35,11 +35,11 @@ func TestAccNetworkContainer_basicAWS(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, cidrBlock, constant.AWS, "US_EAST_1"), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AWS)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AWS)...), }, { Config: configBasic(projectID, cidrBlockUpdated, constant.AWS, "US_EAST_2"), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AWS)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AWS)...), }, { ResourceName: resourceName, @@ -68,11 +68,11 @@ func TestAccNetworkContainer_basicAzure(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, cidrBlock, constant.AZURE, "US_EAST_2"), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AZURE)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AZURE)...), }, { Config: configBasic(projectID, cidrBlockUpdated, constant.AZURE, "US_EAST_2"), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AZURE)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AZURE)...), }, }, }) @@ -94,11 +94,11 @@ func TestAccNetworkContainer_basicGCP(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, gcpCidrBlock, constant.GCP, ""), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.GCP)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.GCP)...), }, { Config: configBasic(projectID, cidrBlockUpdated, constant.GCP, ""), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.GCP)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.GCP)...), }, }, }) @@ -119,7 +119,7 @@ func TestAccNetworkContainer_withRegionsGCP(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, gcpWithRegionsCidrBlock, constant.GCP, regions), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.GCP)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.GCP)...), }, }, }) @@ -144,15 +144,15 @@ func TestAccNetworkContainer_updateIndividualFields(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, cidrBlock, constant.AWS, region), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AWS)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AWS)...), }, { Config: configBasic(projectID, cidrBlockUpdated, constant.AWS, region), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AWS)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AWS)...), }, { Config: configBasic(projectID, cidrBlockUpdated, constant.AWS, regionUpdated), - Check: resource.ComposeTestCheckFunc(commonChecks(constant.AWS)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(constant.AWS)...), }, }, }) diff --git a/internal/service/networkpeering/resource_network_peering_test.go b/internal/service/networkpeering/resource_network_peering_test.go index 03c00ff158..e530d4fa20 100644 --- a/internal/service/networkpeering/resource_network_peering_test.go +++ b/internal/service/networkpeering/resource_network_peering_test.go @@ -43,7 +43,7 @@ func TestAccNetworkRSNetworkPeering_Azure(t *testing.T) { Steps: []resource.TestStep{ { Config: configAzure(projectID, providerName, directoryID, subscriptionID, resourceGroupName, vNetName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "container_id"), @@ -54,7 +54,7 @@ func TestAccNetworkRSNetworkPeering_Azure(t *testing.T) { }, { Config: configAzure(projectID, providerName, directoryID, subscriptionID, resourceGroupName, updatedvNetName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "container_id"), @@ -92,7 +92,7 @@ func TestAccNetworkRSNetworkPeering_GCP(t *testing.T) { Steps: []resource.TestStep{ { Config: configGCP(projectID, providerName, gcpProjectID, networkName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "container_id"), @@ -109,7 +109,7 @@ func TestAccNetworkRSNetworkPeering_GCP(t *testing.T) { }, { Config: configGCP(projectID, providerName, gcpProjectID, updatedNetworkName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "container_id"), @@ -169,7 +169,7 @@ func TestAccNetworkRSNetworkPeering_AWSDifferentRegionName(t *testing.T) { Steps: []resource.TestStep{ { Config: configAWS(orgID, projectName, providerName, vpcID, awsAccountID, vpcCIDRBlock, containerRegion, peerRegion), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) @@ -196,7 +196,7 @@ func basicAWSTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configAWS(orgID, projectName, providerName, vpcID, awsAccountID, vpcCIDRBlock, containerRegion, peerRegion), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { ResourceName: resourceName, diff --git a/internal/service/onlinearchive/resource_online_archive_migration_test.go b/internal/service/onlinearchive/resource_online_archive_migration_test.go index 67bf3a08ff..bce4755b2e 100644 --- a/internal/service/onlinearchive/resource_online_archive_migration_test.go +++ b/internal/service/onlinearchive/resource_online_archive_migration_test.go @@ -34,14 +34,14 @@ func TestMigBackupRSOnlineArchiveWithNoChangeBetweenVersions(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: configFirstStep(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( populateWithSampleData(resourceName, &cluster), ), }, { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(onlineArchiveResourceName, "partition_fields.0.field_name", "last_review"), ), }, diff --git a/internal/service/onlinearchive/resource_online_archive_test.go b/internal/service/onlinearchive/resource_online_archive_test.go index 1a502e7e2d..4386bb9fdf 100644 --- a/internal/service/onlinearchive/resource_online_archive_test.go +++ b/internal/service/onlinearchive/resource_online_archive_test.go @@ -38,13 +38,13 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { // We need this step to pupulate the cluster with Sample Data // The online archive won't work if the cluster does not have data Config: configFirstStep(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( populateWithSampleData(resourceName, &cluster), ), }, { Config: configWithDailySchedule(orgID, projectName, clusterName, 1, 7), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "collection_type"), @@ -60,7 +60,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { }, { Config: configWithDailySchedule(orgID, projectName, clusterName, 2, 8), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "collection_type"), @@ -76,7 +76,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { }, { Config: testAccBackupRSOnlineArchiveConfigWithWeeklySchedule(orgID, projectName, clusterName, 2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "collection_type"), @@ -89,7 +89,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { }, { Config: testAccBackupRSOnlineArchiveConfigWithMonthlySchedule(orgID, projectName, clusterName, 2), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "collection_type"), @@ -102,7 +102,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { }, { Config: configWithoutSchedule(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "collection_type"), @@ -111,7 +111,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { }, { Config: configWithoutSchedule(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(onlineArchiveResourceName, "partition_fields.0.field_name", "last_review"), ), }, @@ -138,13 +138,13 @@ func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { // We need this step to pupulate the cluster with Sample Data // The online archive won't work if the cluster does not have data Config: configFirstStep(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( populateWithSampleData(resourceName, &cluster), ), }, { Config: configWithoutSchedule(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "collection_type"), @@ -152,7 +152,7 @@ func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { }, { Config: configWithDailySchedule(orgID, projectName, clusterName, 1, 1), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "collection_type"), @@ -189,13 +189,13 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { // We need this step to pupulate the cluster with Sample Data // The online archive won't work if the cluster does not have data Config: configFirstStep(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( populateWithSampleData(resourceName, &cluster), ), }, { Config: configWithDataProcessRegion(orgID, projectName, clusterName, cloudProvider, processRegion), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.cloud_provider", cloudProvider), resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.region", processRegion), resource.TestCheckResourceAttr(onlineArchiveDataSourceName, "data_process_region.0.cloud_provider", cloudProvider), @@ -208,7 +208,7 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { }, { Config: configWithoutSchedule(orgID, projectName, clusterName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.cloud_provider", cloudProvider), resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.region", processRegion), ), diff --git a/internal/service/organization/data_source_organization_test.go b/internal/service/organization/data_source_organization_test.go index 1aebdaebfa..482b915cb3 100644 --- a/internal/service/organization/data_source_organization_test.go +++ b/internal/service/organization/data_source_organization_test.go @@ -20,7 +20,7 @@ func TestAccConfigDSOrganization_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasOrganizationConfigWithDS(orgID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(datasourceName, "name"), resource.TestCheckResourceAttrSet(datasourceName, "id"), resource.TestCheckResourceAttrSet(datasourceName, "restrict_employee_access"), diff --git a/internal/service/organization/data_source_organizations_test.go b/internal/service/organization/data_source_organizations_test.go index 28043ce7bb..5cd9e3a23a 100644 --- a/internal/service/organization/data_source_organizations_test.go +++ b/internal/service/organization/data_source_organizations_test.go @@ -18,7 +18,7 @@ func TestAccConfigDSOrganizations_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasOrganizationsConfigWithDS(), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(datasourceName, "results.#"), resource.TestCheckResourceAttrSet(datasourceName, "results.0.name"), resource.TestCheckResourceAttrSet(datasourceName, "results.0.id"), @@ -40,7 +40,7 @@ func TestAccConfigDSOrganizations_withPagination(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasOrganizationsConfigWithPagination(2, 5), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(datasourceName, "results.#"), ), }, diff --git a/internal/service/organization/resource_organization_migration_test.go b/internal/service/organization/resource_organization_migration_test.go index 344f5af8c6..469b3311be 100644 --- a/internal/service/organization/resource_organization_migration_test.go +++ b/internal/service/organization/resource_organization_migration_test.go @@ -27,7 +27,7 @@ func TestMigConfigRSOrganization_Basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttrSet(resourceName, "description"), resource.TestCheckResourceAttr(resourceName, "description", description), diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index 322bcf48da..21ef373574 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -37,7 +37,7 @@ func TestAccConfigRSOrganization_Basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasOrganizationExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), @@ -48,7 +48,7 @@ func TestAccConfigRSOrganization_Basic(t *testing.T) { }, { Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, updatedName, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasOrganizationExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), @@ -110,7 +110,7 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccMongoDBAtlasOrganizationConfigWithSettings(orgOwnerID, name, description, roleName, settingsConfig), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasOrganizationExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), @@ -121,7 +121,7 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { }, { Config: testAccMongoDBAtlasOrganizationConfigWithSettings(orgOwnerID, name, description, roleName, settingsConfigUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasOrganizationExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), @@ -132,7 +132,7 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { }, { Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, "org-name-updated", description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckMongoDBAtlasOrganizationExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttrSet(resourceName, "description"), diff --git a/internal/service/orginvitation/data_source_org_invitation_test.go b/internal/service/orginvitation/data_source_org_invitation_test.go index eee4b19c17..34ef8f67bb 100644 --- a/internal/service/orginvitation/data_source_org_invitation_test.go +++ b/internal/service/orginvitation/data_source_org_invitation_test.go @@ -25,7 +25,7 @@ func TestAccConfigDSOrgInvitation_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(orgID, name, initialRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "invitation_id"), resource.TestCheckResourceAttr(dataSourceName, "org_id", orgID), resource.TestCheckResourceAttr(dataSourceName, "username", name), diff --git a/internal/service/orginvitation/resource_org_invitation_migration_test.go b/internal/service/orginvitation/resource_org_invitation_migration_test.go index 2c9a2d44c1..8d4c995e49 100644 --- a/internal/service/orginvitation/resource_org_invitation_migration_test.go +++ b/internal/service/orginvitation/resource_org_invitation_migration_test.go @@ -25,7 +25,7 @@ func TestMigConfigOrgInvitation_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "invitation_id"), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), diff --git a/internal/service/orginvitation/resource_org_invitation_test.go b/internal/service/orginvitation/resource_org_invitation_test.go index 313da5e312..5a2c4446a4 100644 --- a/internal/service/orginvitation/resource_org_invitation_test.go +++ b/internal/service/orginvitation/resource_org_invitation_test.go @@ -29,7 +29,7 @@ func TestAccConfigRSOrgInvitation_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, name, initialRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "invitation_id"), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -40,7 +40,7 @@ func TestAccConfigRSOrgInvitation_basic(t *testing.T) { }, { Config: configBasic(orgID, name, updateRoles), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "invitation_id"), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go index ccaf15227a..bc89b6732f 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go @@ -48,7 +48,7 @@ func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithDependencies(resourceSuffix, projectID, false, dependencies), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), checkModeClustersUpToDate(projectID, clusterName, clusterResourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -58,7 +58,7 @@ func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { }, { Config: configWithDependencies(resourceSuffix, projectID, true, dependencies), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), checkModeClustersUpToDate(projectID, clusterName, clusterResourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -87,7 +87,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "enabled", "false"), @@ -98,7 +98,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { }, { Config: configBasic(orgID, projectName, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), diff --git a/internal/service/privatelinkendpoint/data_source_privatelink_endpoint_test.go b/internal/service/privatelinkendpoint/data_source_privatelink_endpoint_test.go index 3b344f1cf8..e4838d9ab5 100644 --- a/internal/service/privatelinkendpoint/data_source_privatelink_endpoint_test.go +++ b/internal/service/privatelinkendpoint/data_source_privatelink_endpoint_test.go @@ -23,7 +23,7 @@ func TestAccNetworkDSPrivateLinkEndpoint_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configDS(projectID, providerName, region), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "private_link_id"), diff --git a/internal/service/privatelinkendpoint/resource_privatelink_endpoint_migration_test.go b/internal/service/privatelinkendpoint/resource_privatelink_endpoint_migration_test.go index 5aaed4681d..97ad27497e 100644 --- a/internal/service/privatelinkendpoint/resource_privatelink_endpoint_migration_test.go +++ b/internal/service/privatelinkendpoint/resource_privatelink_endpoint_migration_test.go @@ -26,7 +26,7 @@ func TestMigNetworkPrivateLinkEndpoint_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "provider_name", providerName), diff --git a/internal/service/privatelinkendpoint/resource_privatelink_endpoint_test.go b/internal/service/privatelinkendpoint/resource_privatelink_endpoint_test.go index d809ce6105..c260939d7b 100644 --- a/internal/service/privatelinkendpoint/resource_privatelink_endpoint_test.go +++ b/internal/service/privatelinkendpoint/resource_privatelink_endpoint_test.go @@ -28,7 +28,7 @@ func TestAccNetworkRSPrivateLinkEndpointAWS_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, providerName, region), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "provider_name"), @@ -63,7 +63,7 @@ func TestAccNetworkRSPrivateLinkEndpointAzure_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, providerName, region), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "provider_name"), @@ -98,7 +98,7 @@ func TestAccNetworkRSPrivateLinkEndpointGCP_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, providerName, region), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "provider_name"), diff --git a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless_test.go b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless_test.go index 9cbf362214..0043a1f58f 100644 --- a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless_test.go +++ b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless_test.go @@ -34,7 +34,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, instanceName, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "instance_name", instanceName), ), diff --git a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service_test.go b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service_test.go index 4a5162a421..ae0e822a77 100644 --- a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service_test.go +++ b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service_test.go @@ -50,7 +50,7 @@ func basicAWSTestCase(tb testing.TB) *resource.TestCase { Config: configCompleteAWS( awsAccessKey, awsSecretKey, projectID, providerName, region, vpcID, subnetID, securityGroupID, resourceSuffix, ), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { ResourceName: resourceName, diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go index c1f5c70fc1..882efe2c18 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archive_test.go @@ -27,7 +27,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchiveDS_basic Steps: []resource.TestStep{ { Config: dataSourcesConfigBasic(projectID, endpointID, customerEndpointDNSName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchive, "project_id", projectID), resource.TestCheckResourceAttr(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchive, "endpoint_id", endpointID), diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go index 6c2433308c..86c54d6faa 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives_test.go @@ -26,7 +26,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchivesDSPlura Steps: []resource.TestStep{ { Config: dataSourceConfigBasic(projectID, endpointID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchives, "project_id", projectID), resource.TestCheckResourceAttrSet(dataSourcePrivatelinkEndpointServiceDataFederetionDataArchives, "results.#"), diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go index 98484a9a0c..cc8c60728a 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_migration_test.go @@ -23,7 +23,7 @@ func TestMigNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_basic(t { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID), diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go index fc09aba865..14c9516ec6 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive_test.go @@ -31,7 +31,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_basic(t Steps: []resource.TestStep{ { Config: resourceConfigBasic(projectID, endpointID, comment), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID), @@ -63,7 +63,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_updateC Steps: []resource.TestStep{ { Config: resourceConfigBasic(projectID, endpointID, comment), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID), @@ -74,7 +74,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_updateC }, { Config: resourceConfigBasic(projectID, endpointID, commentUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID), @@ -85,7 +85,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_updateC }, { Config: resourceConfigBasic(projectID, endpointID, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID), @@ -110,7 +110,7 @@ func TestAccNetworkPrivatelinkEndpointServiceDataFederationOnlineArchive_basicWi Steps: []resource.TestStep{ { Config: resourceConfigBasicWithRegionDNSName(projectID, endpointID, comment, customerEndpointDNSName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), resource.TestCheckResourceAttr(resourceName, "endpoint_id", endpointID), diff --git a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_migration_test.go b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_migration_test.go index 3401010a15..6070ae8a34 100644 --- a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_migration_test.go +++ b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_migration_test.go @@ -27,7 +27,7 @@ func TestMigServerlessPrivateLinkEndpointService_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "provider_name", "AWS"), resource.TestCheckResourceAttr(resourceName, "comment", commentOrigin), @@ -57,7 +57,7 @@ func TestMigServerlessPrivateLinkEndpointService_AWSVPC(t *testing.T) { { ExternalProviders: mig.ExternalProvidersWithAWS(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "provider_name", "AWS"), ), diff --git a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_test.go b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_test.go index 730ce95258..d238d1f790 100644 --- a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_test.go +++ b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless_test.go @@ -31,7 +31,7 @@ func TestAccServerlessPrivateLinkEndpointService_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, instanceName, commentOrigin), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "provider_name", "AWS"), resource.TestCheckResourceAttr(resourceName, "comment", commentOrigin), @@ -43,7 +43,7 @@ func TestAccServerlessPrivateLinkEndpointService_basic(t *testing.T) { }, { Config: configBasic(projectID, instanceName, commentUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "provider_name", "AWS"), resource.TestCheckResourceAttr(resourceName, "comment", commentUpdated), @@ -83,7 +83,7 @@ func TestAccServerlessPrivateLinkEndpointService_AWSEndpointCommentUpdate(t *tes Steps: []resource.TestStep{ { Config: configAWSEndpoint(projectID, instanceName, awsAccessKey, awsSecretKey, false, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "provider_name", "AWS"), resource.TestCheckResourceAttrSet(datasourceEndpointsName, "project_id"), @@ -93,7 +93,7 @@ func TestAccServerlessPrivateLinkEndpointService_AWSEndpointCommentUpdate(t *tes }, { Config: configAWSEndpoint(projectID, instanceName, awsAccessKey, awsSecretKey, true, commentUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "provider_name", "AWS"), resource.TestCheckResourceAttr(resourceName, "comment", commentUpdated), diff --git a/internal/service/project/resource_project_migration_test.go b/internal/service/project/resource_project_migration_test.go index c4c4d25f5d..66b8aea30e 100644 --- a/internal/service/project/resource_project_migration_test.go +++ b/internal/service/project/resource_project_migration_test.go @@ -28,7 +28,7 @@ func TestMigProject_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "org_id", orgID), ), }, @@ -67,7 +67,7 @@ func TestMigProject_withTeams(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -94,7 +94,7 @@ func TestMigProject_withFalseDefaultSettings(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -128,7 +128,7 @@ func TestMigProject_withLimits(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "limits.0.name", "atlas.project.deployment.clusters"), @@ -156,14 +156,14 @@ func TestMigGovProject_regionUsageRestrictionsDefault(t *testing.T) { { ExternalProviders: acc.ExternalProviders("1.15.3"), Config: configGovSimple(orgID, projectName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsGov(resourceName), ), }, { ExternalProviders: acc.ExternalProviders("1.16.0"), Config: configGovSimple(orgID, projectName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsGov(resourceName), ), ExpectError: regexp.MustCompile("Provider produced inconsistent result after apply"), diff --git a/internal/service/project/resource_project_test.go b/internal/service/project/resource_project_test.go index 8ed4463755..139873b29d 100644 --- a/internal/service/project/resource_project_test.go +++ b/internal/service/project/resource_project_test.go @@ -551,7 +551,7 @@ func TestAccProject_basic(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { Config: configBasic(orgID, projectName, projectOwnerID, false, @@ -570,7 +570,7 @@ func TestAccProject_basic(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -592,7 +592,7 @@ func TestAccProject_basic(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -625,7 +625,7 @@ func TestAccGovProject_withProjectOwner(t *testing.T) { Steps: []resource.TestStep{ { Config: configGovWithOwner(orgID, projectName, projectOwnerID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExistsGov(resourceName), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -651,7 +651,7 @@ func TestAccProject_withFalseDefaultSettings(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithFalseDefaultSettings(orgID, projectName, projectOwnerID), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -675,7 +675,7 @@ func TestAccProject_withUpdatedSettings(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), @@ -691,7 +691,7 @@ func TestAccProject_withUpdatedSettings(t *testing.T) { }, { Config: acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "with_default_alerts_settings", "true"), resource.TestCheckResourceAttr(resourceName, "is_collect_database_specifics_statistics_enabled", "true"), @@ -704,7 +704,7 @@ func TestAccProject_withUpdatedSettings(t *testing.T) { }, { Config: acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "with_default_alerts_settings", "false"), resource.TestCheckResourceAttr(resourceName, "is_collect_database_specifics_statistics_enabled", "false"), @@ -734,7 +734,7 @@ func TestAccProject_withUpdatedRole(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithUpdatedRole(orgID, projectName, acc.GetProjectTeamsIDsWithPos(0), roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cluster_count", clusterCount), @@ -742,7 +742,7 @@ func TestAccProject_withUpdatedRole(t *testing.T) { }, { Config: configWithUpdatedRole(orgID, projectName, acc.GetProjectTeamsIDsWithPos(0), roleNameUpdated), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "cluster_count", clusterCount), @@ -771,7 +771,7 @@ func TestAccProject_updatedToEmptyRoles(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "teams.#", "1"), resource.TestCheckResourceAttr(resourceName, "teams.0.team_id", acc.GetProjectTeamsIDsWithPos(0)), @@ -782,7 +782,7 @@ func TestAccProject_updatedToEmptyRoles(t *testing.T) { }, { Config: configBasic(orgID, projectName, "", false, nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "teams.#", "0"), ), @@ -829,7 +829,7 @@ func TestAccProject_withUpdatedLimits(t *testing.T) { Value: 1, }, }), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { Config: configWithLimits(orgID, projectName, []*admin.DataFederationLimit{ @@ -838,7 +838,7 @@ func TestAccProject_withUpdatedLimits(t *testing.T) { Value: 2, }, }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "limits.0.name", "atlas.project.deployment.nodesPerPrivateLinkRegion"), @@ -860,7 +860,7 @@ func TestAccProject_withUpdatedLimits(t *testing.T) { Value: 30, }, }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckTypeSetElemNestedAttrs( @@ -910,7 +910,7 @@ func TestAccProject_updatedToEmptyLimits(t *testing.T) { Value: 1, }, }), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "limits.#", "1"), resource.TestCheckResourceAttr(resourceName, "limits.0.name", "atlas.project.deployment.clusters"), resource.TestCheckResourceAttr(resourceName, "limits.0.value", "1"), @@ -918,7 +918,7 @@ func TestAccProject_updatedToEmptyLimits(t *testing.T) { }, { Config: configWithLimits(orgID, projectName, nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "limits.#", "0"), ), }, @@ -963,7 +963,7 @@ func TestAccProject_withInvalidLimitNameOnUpdate(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithLimits(orgID, projectName, []*admin.DataFederationLimit{}), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), ), @@ -1011,7 +1011,7 @@ func TestAccProject_withTags(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithTags(orgID, projectName, nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", projectName), resource.TestCheckResourceAttr(resourceName, "org_id", orgID), resource.TestCheckResourceAttr(resourceName, "tags.#", "0"), @@ -1028,7 +1028,7 @@ func TestAccProject_withTags(t *testing.T) { }, { Config: configWithTags(orgID, projectName, tagsEmpty), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "tags.#", "0"), resource.TestCheckResourceAttr(dataSourceNameByID, "tags.#", "0"), ), diff --git a/internal/service/projectapikey/resource_project_api_key_test.go b/internal/service/projectapikey/resource_project_api_key_test.go index e2f8a18d11..8654cfcce7 100644 --- a/internal/service/projectapikey/resource_project_api_key_test.go +++ b/internal/service/projectapikey/resource_project_api_key_test.go @@ -39,7 +39,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttrSet(resourceName, "public_key"), resource.TestCheckResourceAttr(resourceName, "project_assignment.#", "1"), @@ -70,7 +70,7 @@ func TestAccProjectAPIKey_changingSingleProject(t *testing.T) { Steps: []resource.TestStep{ { Config: configChangingProject(orgID, projectName2, description, fmt.Sprintf("%q", projectID1)), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttrSet(resourceName, "public_key"), resource.TestCheckResourceAttr(resourceName, "project_assignment.#", "1"), @@ -78,7 +78,7 @@ func TestAccProjectAPIKey_changingSingleProject(t *testing.T) { }, { Config: configChangingProject(orgID, projectName2, description, "mongodbatlas_project.proj2.id"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttrSet(resourceName, "public_key"), resource.TestCheckResourceAttr(resourceName, "project_assignment.#", "1"), @@ -103,7 +103,7 @@ func TestAccProjectAPIKey_multiple(t *testing.T) { Steps: []resource.TestStep{ { Config: configMultiple(projectID, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "description"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttrSet(resourceName, "project_assignment.0.project_id"), @@ -134,14 +134,14 @@ func TestAccProjectAPIKey_updateDescription(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, description, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "description"), resource.TestCheckResourceAttr(resourceName, "description", description), ), }, { Config: configBasic(projectID, updatedDescription, roleName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "description"), resource.TestCheckResourceAttr(resourceName, "description", updatedDescription), ), @@ -167,7 +167,7 @@ func TestAccProjectAPIKey_recreateWhenDeletedExternally(t *testing.T) { Steps: []resource.TestStep{ { Config: projectAPIKeyConfig, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "description"), ), }, @@ -200,14 +200,14 @@ func TestAccProjectAPIKey_deleteProjectAndAssignment(t *testing.T) { Steps: []resource.TestStep{ { Config: configDeletedProjectAndAssignment(orgID, projectID1, projectName2, description, true), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_assignment.0.project_id"), resource.TestCheckResourceAttrSet(resourceName, "project_assignment.1.project_id"), ), }, { Config: configDeletedProjectAndAssignment(orgID, projectID1, projectName2, description, false), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_assignment.0.project_id"), ), }, diff --git a/internal/service/projectinvitation/data_source_project_invitation_test.go b/internal/service/projectinvitation/data_source_project_invitation_test.go index 7e08420b82..edf967dfd9 100644 --- a/internal/service/projectinvitation/data_source_project_invitation_test.go +++ b/internal/service/projectinvitation/data_source_project_invitation_test.go @@ -26,7 +26,7 @@ func TestAccProjectDSProjectInvitation_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDataSourceMongoDBAtlasProjectInvitationConfig(orgID, projectName, name, initialRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "project_id"), resource.TestCheckResourceAttrSet(dataSourceName, "username"), resource.TestCheckResourceAttrSet(dataSourceName, "invitation_id"), diff --git a/internal/service/projectinvitation/resource_project_invitation_migration_test.go b/internal/service/projectinvitation/resource_project_invitation_migration_test.go index 3feadfad2a..2e94f92b24 100644 --- a/internal/service/projectinvitation/resource_project_invitation_migration_test.go +++ b/internal/service/projectinvitation/resource_project_invitation_migration_test.go @@ -26,7 +26,7 @@ func TestMigProjectInvitation_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: configBasic(orgID, projectName, name, roles), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "invitation_id"), diff --git a/internal/service/projectinvitation/resource_project_invitation_test.go b/internal/service/projectinvitation/resource_project_invitation_test.go index d9a8c04328..ea16af94b2 100644 --- a/internal/service/projectinvitation/resource_project_invitation_test.go +++ b/internal/service/projectinvitation/resource_project_invitation_test.go @@ -30,7 +30,7 @@ func TestAccProjectRSProjectInvitation_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, projectName, name, initialRole), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "invitation_id"), @@ -41,7 +41,7 @@ func TestAccProjectRSProjectInvitation_basic(t *testing.T) { }, { Config: configBasic(orgID, projectName, name, updateRoles), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "invitation_id"), diff --git a/internal/service/projectipaccesslist/resource_project_ip_access_list_migration_test.go b/internal/service/projectipaccesslist/resource_project_ip_access_list_migration_test.go index 6ea5b910aa..fbe39659b2 100644 --- a/internal/service/projectipaccesslist/resource_project_ip_access_list_migration_test.go +++ b/internal/service/projectipaccesslist/resource_project_ip_access_list_migration_test.go @@ -25,7 +25,7 @@ func TestMigProjectIPAccessList_settingIPAddress(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc(commonChecks(ipAddress, "", "", comment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(ipAddress, "", "", comment)...), }, mig.TestStepCheckEmptyPlan(config), }, @@ -47,7 +47,7 @@ func TestMigProjectIPAccessList_settingCIDRBlock(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc(commonChecks("", cidrBlock, "", comment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks("", cidrBlock, "", comment)...), }, mig.TestStepCheckEmptyPlan(config), }, @@ -75,7 +75,7 @@ func TestMigProjectIPAccessList_settingAWSSecurityGroup(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc(commonChecks("", "", awsSGroup, comment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks("", "", awsSGroup, comment)...), }, mig.TestStepCheckEmptyPlan(config), }, diff --git a/internal/service/projectipaccesslist/resource_project_ip_access_list_test.go b/internal/service/projectipaccesslist/resource_project_ip_access_list_test.go index 855281a0e4..b75d5f6f1f 100644 --- a/internal/service/projectipaccesslist/resource_project_ip_access_list_test.go +++ b/internal/service/projectipaccesslist/resource_project_ip_access_list_test.go @@ -35,11 +35,11 @@ func TestAccProjectIPAccesslist_settingIPAddress(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithIPAddress(projectID, ipAddress, comment), - Check: resource.ComposeTestCheckFunc(commonChecks(ipAddress, "", "", comment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(ipAddress, "", "", comment)...), }, { Config: configWithIPAddress(projectID, updatedIPAddress, updatedComment), - Check: resource.ComposeTestCheckFunc(commonChecks(updatedIPAddress, "", "", updatedComment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(updatedIPAddress, "", "", updatedComment)...), }, { ResourceName: resourceName, @@ -67,11 +67,11 @@ func TestAccProjectIPAccessList_settingCIDRBlock(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithCIDRBlock(projectID, cidrBlock, comment), - Check: resource.ComposeTestCheckFunc(commonChecks("", cidrBlock, "", comment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks("", cidrBlock, "", comment)...), }, { Config: configWithCIDRBlock(projectID, updatedCIDRBlock, updatedComment), - Check: resource.ComposeTestCheckFunc(commonChecks("", updatedCIDRBlock, "", updatedComment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks("", updatedCIDRBlock, "", updatedComment)...), }, }, }) @@ -98,11 +98,11 @@ func TestAccProjectIPAccessList_settingAWSSecurityGroup(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithAWSSecurityGroup(projectID, providerName, vpcID, awsAccountID, vpcCIDRBlock, awsRegion, awsSGroup, comment), - Check: resource.ComposeTestCheckFunc(commonChecks("", "", awsSGroup, comment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks("", "", awsSGroup, comment)...), }, { Config: configWithAWSSecurityGroup(projectID, providerName, vpcID, awsAccountID, vpcCIDRBlock, awsRegion, updatedAWSSgroup, updatedComment), - Check: resource.ComposeTestCheckFunc(commonChecks("", "", updatedAWSSgroup, updatedComment)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks("", "", updatedAWSSgroup, updatedComment)...), }, }, }) @@ -144,11 +144,11 @@ func TestAccProjectIPAccessList_settingMultiple(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithMultiple(projectID, accessList, false), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { Config: configWithMultiple(projectID, accessList, true), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) diff --git a/internal/service/pushbasedlogexport/resource_test.go b/internal/service/pushbasedlogexport/resource_test.go index 3e25bbe489..a74f237922 100644 --- a/internal/service/pushbasedlogexport/resource_test.go +++ b/internal/service/pushbasedlogexport/resource_test.go @@ -45,11 +45,11 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, s3BucketName1, s3BucketName2, s3BucketPolicyName, awsIAMRoleName, awsIAMRolePolicyName, nonEmptyPrefixPath, true), - Check: resource.ComposeTestCheckFunc(commonChecks(s3BucketName1, nonEmptyPrefixPath)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(s3BucketName1, nonEmptyPrefixPath)...), }, { Config: configBasicUpdated(projectID, s3BucketName1, s3BucketName2, s3BucketPolicyName, awsIAMRoleName, awsIAMRolePolicyName, nonEmptyPrefixPath, true), - Check: resource.ComposeTestCheckFunc(commonChecks(s3BucketName2, nonEmptyPrefixPath)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(s3BucketName2, nonEmptyPrefixPath)...), }, { Config: configBasicUpdated(projectID, s3BucketName1, s3BucketName2, s3BucketPolicyName, awsIAMRoleName, awsIAMRolePolicyName, nonEmptyPrefixPath, true), @@ -87,7 +87,7 @@ func noPrefixPathTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, s3BucketName1, s3BucketName2, s3BucketPolicyName, awsIAMRoleName, awsIAMRolePolicyName, defaultPrefixPath, false), - Check: resource.ComposeTestCheckFunc(commonChecks(s3BucketName1, defaultPrefixPath)...), + Check: resource.ComposeAggregateTestCheckFunc(commonChecks(s3BucketName1, defaultPrefixPath)...), }, }, } diff --git a/internal/service/searchdeployment/resource_search_deployment_migration_test.go b/internal/service/searchdeployment/resource_search_deployment_migration_test.go index f08c0ed778..a01ccbe89a 100644 --- a/internal/service/searchdeployment/resource_search_deployment_migration_test.go +++ b/internal/service/searchdeployment/resource_search_deployment_migration_test.go @@ -27,7 +27,7 @@ func TestMigSearchDeployment_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc(searchNodeChecks(resourceName, clusterName, instanceSize, searchNodeCount)...), + Check: resource.ComposeAggregateTestCheckFunc(searchNodeChecks(resourceName, clusterName, instanceSize, searchNodeCount)...), }, mig.TestStepCheckEmptyPlan(config), }, diff --git a/internal/service/searchdeployment/resource_search_deployment_test.go b/internal/service/searchdeployment/resource_search_deployment_test.go index c6538a2b7e..8e1ad8da5a 100644 --- a/internal/service/searchdeployment/resource_search_deployment_test.go +++ b/internal/service/searchdeployment/resource_search_deployment_test.go @@ -41,7 +41,7 @@ func newSearchNodeTestStep(resourceName, orgID, projectName, clusterName, instan dataSourceChecks := searchNodeChecks(fmt.Sprintf("data.%s", resourceName), clusterName, instanceSize, searchNodeCount) return resource.TestStep{ Config: configBasic(orgID, projectName, clusterName, instanceSize, searchNodeCount), - Check: resource.ComposeTestCheckFunc(append(resourceChecks, dataSourceChecks...)...), + Check: resource.ComposeAggregateTestCheckFunc(append(resourceChecks, dataSourceChecks...)...), } } diff --git a/internal/service/searchindex/resource_search_index_test.go b/internal/service/searchindex/resource_search_index_test.go index 3de5f30d99..dc0cc3c0ac 100644 --- a/internal/service/searchindex/resource_search_index_test.go +++ b/internal/service/searchindex/resource_search_index_test.go @@ -31,7 +31,7 @@ func TestAccSearchIndex_withSearchType(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, indexName, databaseName, clusterName, true), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) @@ -54,7 +54,7 @@ func TestAccSearchIndex_withMapping(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithMapping(projectID, indexName, databaseName, clusterName), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) @@ -84,7 +84,7 @@ func TestAccSearchIndex_withSynonyms(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, with), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) @@ -113,11 +113,11 @@ func TestAccSearchIndex_updatedToEmptySynonyms(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, with), - Check: resource.ComposeTestCheckFunc(checks1...), + Check: resource.ComposeAggregateTestCheckFunc(checks1...), }, { Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, without), - Check: resource.ComposeTestCheckFunc(checks2...), + Check: resource.ComposeAggregateTestCheckFunc(checks2...), }, }, }) @@ -136,14 +136,14 @@ func TestAccSearchIndex_updatedToEmptyAnalyzers(t *testing.T) { Steps: []resource.TestStep{ { Config: configAdditional(projectID, indexName, databaseName, clusterName, analyzersTF), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrWith(resourceName, "analyzers", acc.JSONEquals(analyzersJSON)), ), }, { Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "analyzers", ""), ), @@ -165,14 +165,14 @@ func TestAccSearchIndex_updatedToEmptyMappingsFields(t *testing.T) { Steps: []resource.TestStep{ { Config: configAdditional(projectID, indexName, databaseName, clusterName, mappingsFieldsTF), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrWith(resourceName, "mappings_fields", acc.JSONEquals(mappingsFieldsJSON)), ), }, { Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "mappings_fields", ""), ), @@ -203,7 +203,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasic(projectID, indexName, databaseName, clusterName, false), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { Config: configBasic(projectID, indexName, databaseName, clusterName, false), @@ -243,7 +243,7 @@ func basicVectorTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configVector(projectID, indexName, databaseName, clusterName), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, } diff --git a/internal/service/serverlessinstance/resource_serverless_instance_test.go b/internal/service/serverlessinstance/resource_serverless_instance_test.go index ee9d0c62f0..d6b3fd9447 100644 --- a/internal/service/serverlessinstance/resource_serverless_instance_test.go +++ b/internal/service/serverlessinstance/resource_serverless_instance_test.go @@ -34,7 +34,7 @@ func TestAccServerlessInstance_withTags(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigServerlessInstance(projectID, instanceName, false, nil, nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", instanceName), resource.TestCheckResourceAttr(resourceName, "tags.#", "0"), @@ -54,7 +54,7 @@ func TestAccServerlessInstance_withTags(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", instanceName), resource.TestCheckResourceAttr(resourceName, "tags.#", "2"), @@ -76,7 +76,7 @@ func TestAccServerlessInstance_withTags(t *testing.T) { }, }, ), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "name", instanceName), resource.TestCheckResourceAttr(resourceName, "tags.#", "1"), @@ -103,7 +103,7 @@ func TestAccServerlessInstance_autoIndexing(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.ConfigServerlessInstance(projectID, instanceName, false, conversion.Pointer(false), nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "auto_indexing", "false"), resource.TestCheckResourceAttr(dataSourceName, "auto_indexing", "false"), @@ -112,7 +112,7 @@ func TestAccServerlessInstance_autoIndexing(t *testing.T) { }, { Config: acc.ConfigServerlessInstance(projectID, instanceName, false, conversion.Pointer(true), nil), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "auto_indexing", "true"), resource.TestCheckResourceAttr(dataSourceName, "auto_indexing", "true"), @@ -137,7 +137,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: acc.ConfigServerlessInstance(projectID, instanceName, true, nil, nil), - Check: resource.ComposeTestCheckFunc(basicChecks(projectID, instanceName)...), + Check: resource.ComposeAggregateTestCheckFunc(basicChecks(projectID, instanceName)...), }, { ResourceName: resourceName, diff --git a/internal/service/streamconnection/data_source_stream_connections_test.go b/internal/service/streamconnection/data_source_stream_connections_test.go index f9a16c85a4..cdf5c80875 100644 --- a/internal/service/streamconnection/data_source_stream_connections_test.go +++ b/internal/service/streamconnection/data_source_stream_connections_test.go @@ -84,5 +84,5 @@ func streamConnectionsAttributeChecks(resourceName string, pageNum, itemsPerPage if itemsPerPage != nil { resourceChecks = append(resourceChecks, resource.TestCheckResourceAttr(resourceName, "items_per_page", fmt.Sprint(*itemsPerPage))) } - return resource.ComposeTestCheckFunc(resourceChecks...) + return resource.ComposeAggregateTestCheckFunc(resourceChecks...) } diff --git a/internal/service/streamconnection/resource_stream_connection_test.go b/internal/service/streamconnection/resource_stream_connection_test.go index bee389a194..f65dce618b 100644 --- a/internal/service/streamconnection/resource_stream_connection_test.go +++ b/internal/service/streamconnection/resource_stream_connection_test.go @@ -182,7 +182,7 @@ func sampleStreamConnectionAttributeChecks( resource.TestCheckResourceAttr(resourceName, "connection_name", sampleName), resource.TestCheckResourceAttr(resourceName, "type", "Sample"), } - return resource.ComposeTestCheckFunc(resourceChecks...) + return resource.ComposeAggregateTestCheckFunc(resourceChecks...) } func kafkaStreamConnectionAttributeChecks( @@ -209,7 +209,7 @@ func kafkaStreamConnectionAttributeChecks( resource.TestCheckResourceAttrSet(resourceName, "security.broker_public_certificate"), ) } - return resource.ComposeTestCheckFunc(resourceChecks...) + return resource.ComposeAggregateTestCheckFunc(resourceChecks...) } func clusterStreamConnectionConfig(projectID, instanceName, clusterName string) string { @@ -248,7 +248,7 @@ func clusterStreamConnectionAttributeChecks(resourceName, clusterName string) re resource.TestCheckResourceAttr(resourceName, "db_role_to_execute.role", "atlasAdmin"), resource.TestCheckResourceAttr(resourceName, "db_role_to_execute.type", "BUILT_IN"), } - return resource.ComposeTestCheckFunc(resourceChecks...) + return resource.ComposeAggregateTestCheckFunc(resourceChecks...) } func checkStreamConnectionImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { diff --git a/internal/service/streaminstance/data_source_stream_instance_test.go b/internal/service/streaminstance/data_source_stream_instance_test.go index 4124d3e1d0..1a0f77b94e 100644 --- a/internal/service/streaminstance/data_source_stream_instance_test.go +++ b/internal/service/streaminstance/data_source_stream_instance_test.go @@ -22,7 +22,7 @@ func TestAccStreamDSStreamInstance_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: streamInstanceDataSourceConfig(projectID, instanceName, region, cloudProvider), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( streamInstanceAttributeChecks(dataSourceName, instanceName, region, cloudProvider), resource.TestCheckResourceAttr(dataSourceName, "stream_config.tier", "SP30"), ), diff --git a/internal/service/streaminstance/data_source_stream_instances_test.go b/internal/service/streaminstance/data_source_stream_instances_test.go index 6c7177373e..ac4cc04717 100644 --- a/internal/service/streaminstance/data_source_stream_instances_test.go +++ b/internal/service/streaminstance/data_source_stream_instances_test.go @@ -30,7 +30,7 @@ func TestAccStreamDSStreamInstances_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: streamInstancesDataSourceConfig(projectID, instanceName, region, cloudProvider), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) @@ -54,7 +54,7 @@ func TestAccStreamDSStreamInstances_withPageConfig(t *testing.T) { Steps: []resource.TestStep{ { Config: streamInstancesWithPageAttrDataSourceConfig(projectID, instanceName, region, cloudProvider, pageNumber), - Check: resource.ComposeTestCheckFunc(checks...), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, }, }) diff --git a/internal/service/streaminstance/resource_stream_instance_test.go b/internal/service/streaminstance/resource_stream_instance_test.go index 8fe48d8132..caedf3662b 100644 --- a/internal/service/streaminstance/resource_stream_instance_test.go +++ b/internal/service/streaminstance/resource_stream_instance_test.go @@ -23,7 +23,7 @@ func TestAccStreamRSStreamInstance_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.StreamInstanceConfig(projectID, instanceName, region, cloudProvider), // as of now there are no values that can be updated because only one region is supported - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( streamInstanceAttributeChecks(resourceName, instanceName, region, cloudProvider), resource.TestCheckResourceAttr(resourceName, "stream_config.tier", "SP30"), ), @@ -51,7 +51,7 @@ func TestAccStreamRSStreamInstance_withStreamConfig(t *testing.T) { Steps: []resource.TestStep{ { Config: acc.StreamInstanceWithStreamConfigConfig(projectID, instanceName, region, cloudProvider, "SP10"), // as of now there are no values that can be updated because only one region is supported - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( streamInstanceAttributeChecks(resourceName, instanceName, region, cloudProvider), resource.TestCheckResourceAttr(resourceName, "stream_config.tier", "SP10"), ), @@ -76,7 +76,7 @@ func streamInstanceAttributeChecks(resourceName, instanceName, region, cloudProv resource.TestCheckResourceAttr(resourceName, "data_process_region.cloud_provider", cloudProvider), resource.TestCheckResourceAttr(resourceName, "hostnames.#", "1"), } - return resource.ComposeTestCheckFunc(resourceChecks...) + return resource.ComposeAggregateTestCheckFunc(resourceChecks...) } func checkStreamInstanceImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { diff --git a/internal/service/team/data_source_team_test.go b/internal/service/team/data_source_team_test.go index a8e4528bcd..0eac53e4a5 100644 --- a/internal/service/team/data_source_team_test.go +++ b/internal/service/team/data_source_team_test.go @@ -24,7 +24,7 @@ func TestAccConfigDSTeam_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: dataSourceConfigBasic(orgID, name, username), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "org_id"), resource.TestCheckResourceAttrSet(dataSourceName, "team_id"), resource.TestCheckResourceAttr(dataSourceName, "name", name), @@ -50,7 +50,7 @@ func TestAccConfigDSTeamByName_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: dataSourceConfigBasicByName(orgID, name, username), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(dataSourceName, "org_id"), resource.TestCheckResourceAttrSet(dataSourceName, "team_id"), resource.TestCheckResourceAttr(dataSourceName, "name", name), diff --git a/internal/service/team/resource_team_migration_test.go b/internal/service/team/resource_team_migration_test.go index 5df34be50f..b1bb03d412 100644 --- a/internal/service/team/resource_team_migration_test.go +++ b/internal/service/team/resource_team_migration_test.go @@ -25,7 +25,7 @@ func TestMigConfigTeams_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "name", name), diff --git a/internal/service/team/resource_team_test.go b/internal/service/team/resource_team_test.go index 4a9acb58a4..5353292a8b 100644 --- a/internal/service/team/resource_team_test.go +++ b/internal/service/team/resource_team_test.go @@ -30,7 +30,7 @@ func TestAccConfigRSTeam_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(orgID, name, usernames), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "name", name), @@ -39,7 +39,7 @@ func TestAccConfigRSTeam_basic(t *testing.T) { }, { Config: configBasic(orgID, updatedName, usernames), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "name", updatedName), @@ -48,7 +48,7 @@ func TestAccConfigRSTeam_basic(t *testing.T) { }, { Config: configBasic(orgID, updatedName, usernames), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "name", updatedName), @@ -81,7 +81,7 @@ func TestAccConfigRSTeam_legacyName(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasicLegacyNames(orgID, name, usernames), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "name", name), diff --git a/internal/service/thirdpartyintegration/resource_third_party_integration_test.go b/internal/service/thirdpartyintegration/resource_third_party_integration_test.go index 729c00cadb..a4f9f78105 100644 --- a/internal/service/thirdpartyintegration/resource_third_party_integration_test.go +++ b/internal/service/thirdpartyintegration/resource_third_party_integration_test.go @@ -64,7 +64,7 @@ func basicPagerDutyTest(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configBasicPagerDuty(projectID, serviceKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "service_key", serviceKey), @@ -77,7 +77,7 @@ func basicPagerDutyTest(tb testing.TB) *resource.TestCase { }, { Config: configBasicPagerDuty(projectID, updatedServiceKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "service_key", updatedServiceKey), @@ -103,7 +103,7 @@ func opsGenieTest(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configOpsGenie(projectID, apiKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "api_key", apiKey), @@ -114,7 +114,7 @@ func opsGenieTest(tb testing.TB) *resource.TestCase { }, { Config: configOpsGenie(projectID, updatedAPIKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "api_key", updatedAPIKey), @@ -141,7 +141,7 @@ func victorOpsTest(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configVictorOps(projectID, apiKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "api_key", apiKey), @@ -152,7 +152,7 @@ func victorOpsTest(tb testing.TB) *resource.TestCase { }, { Config: configVictorOps(projectID, updatedAPIKey), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "api_key", updatedAPIKey), @@ -179,7 +179,7 @@ func datadogTest(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configDatadog(projectID, apiKey, "US"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "api_key", apiKey), @@ -190,7 +190,7 @@ func datadogTest(tb testing.TB) *resource.TestCase { }, { Config: configDatadog(projectID, updatedAPIKey, "US"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "api_key", updatedAPIKey), @@ -220,7 +220,7 @@ func prometheusTest(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configPrometheus(projectID, username, password, serviceDiscovery, scheme), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "user_name", username), @@ -234,7 +234,7 @@ func prometheusTest(tb testing.TB) *resource.TestCase { }, { Config: configPrometheus(projectID, updatedUsername, password, serviceDiscovery, scheme), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "user_name", updatedUsername), @@ -266,7 +266,7 @@ func microsoftTeamsTest(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configMicrosoftTeams(projectID, url), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "microsoft_teams_webhook_url", url), @@ -275,7 +275,7 @@ func microsoftTeamsTest(tb testing.TB) *resource.TestCase { }, { Config: configMicrosoftTeams(projectID, updatedURL), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "microsoft_teams_webhook_url", updatedURL), @@ -301,7 +301,7 @@ func webhookTest(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configWebhook(projectID, url), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "url", url), @@ -310,7 +310,7 @@ func webhookTest(tb testing.TB) *resource.TestCase { }, { Config: configWebhook(projectID, updatedURL), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "url", updatedURL), diff --git a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_migration_test.go b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_migration_test.go index eb327e5ca8..1364a57c8d 100644 --- a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_migration_test.go +++ b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_migration_test.go @@ -24,7 +24,7 @@ func TestMigGenericX509AuthDBUser_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -51,7 +51,7 @@ func TestMigGenericX509AuthDBUser_withCustomerX509(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "customer_x509_cas"), diff --git a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_test.go b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_test.go index 3abf786728..18bf930f0c 100644 --- a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_test.go +++ b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user_test.go @@ -33,7 +33,7 @@ func TestAccGenericX509AuthDBUser_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: configBasic(projectID, username), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "username", username), @@ -64,7 +64,7 @@ func TestAccGenericX509AuthDBUser_withCustomerX509(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithCustomerX509(orgID, projectName, cas), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "customer_x509_cas"), @@ -94,7 +94,7 @@ func TestAccGenericX509AuthDBUser_withDatabaseUser(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithDatabaseUser(projectID, username, months), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttrSet(resourceName, "username"), From 27ca92a1406afccf8bd5b1d5d347fb0262d8cf58 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Mon, 1 Jul 2024 15:33:01 +0200 Subject: [PATCH 07/84] chore: Updates asdf to TF 1.9.0 and compatibility matrix body (#2376) * update asdf to TF 1.9.0 * update compatibility message * Update .github/workflows/update_tf_compatibility_matrix.yml Co-authored-by: maastha <122359335+maastha@users.noreply.github.com> * Fix actionlint --------- Co-authored-by: maastha <122359335+maastha@users.noreply.github.com> --- .github/workflows/update_tf_compatibility_matrix.yml | 2 +- .tool-versions | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update_tf_compatibility_matrix.yml b/.github/workflows/update_tf_compatibility_matrix.yml index a58fcd74d9..7af1303001 100644 --- a/.github/workflows/update_tf_compatibility_matrix.yml +++ b/.github/workflows/update_tf_compatibility_matrix.yml @@ -30,4 +30,4 @@ jobs: commit-message: "doc: Updates Terraform Compatibility Matrix documentation" delete-branch: true branch: terraform-compatibility-matrix-update - body: Automatic updates for Terraform Compatibility Matrix documentation + body: "Automatic updates for Terraform Compatibility Matrix documentation. **Action Required**: Update .tools-version file and TF_VERSION_LATEST GitHub environment variable if needed." diff --git a/.tool-versions b/.tool-versions index 50fac1e3eb..4277997d39 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ golang 1.22.4 -terraform 1.8.5 +terraform 1.9.0 From ac3f1fde3a80a42855766139221cb6e7c2f38cfb Mon Sep 17 00:00:00 2001 From: Andrea Angiolillo Date: Mon, 1 Jul 2024 15:47:06 +0200 Subject: [PATCH 08/84] fix: stale.yaml gh action (#2379) --- .github/workflows/stale.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index c60444a7c8..d6e4cb9a04 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,9 +10,8 @@ on: jobs: stale: runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write + env: + GITHUB_TOKEN: ${{ secrets.APIX_BOT_PAT }} steps: - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e id: stale From 31d438109dfd796a07743de0c9f46f2fdfa1853f Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Tue, 2 Jul 2024 20:58:59 +0100 Subject: [PATCH 09/84] doc: Updates alert-config examples (#2378) * doc: Update alert-config examples * doc: Removes other references to GROUP_CHARTS_ADMIN * chore: align table --- .../docs/d/alert_configuration.html.markdown | 2 +- .../docs/r/alert_configuration.html.markdown | 21 +++++++++---------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/website/docs/d/alert_configuration.html.markdown b/website/docs/d/alert_configuration.html.markdown index 4011d5805a..5672d173a2 100644 --- a/website/docs/d/alert_configuration.html.markdown +++ b/website/docs/d/alert_configuration.html.markdown @@ -65,7 +65,7 @@ resource "mongodbatlas_alert_configuration" "test" { delay_min = 0 sms_enabled = false email_enabled = true - roles = ["GROUP_CHARTS_ADMIN", "GROUP_CLUSTER_MANAGER"] + roles = ["GROUP_CLUSTER_MANAGER"] } matcher { diff --git a/website/docs/r/alert_configuration.html.markdown b/website/docs/r/alert_configuration.html.markdown index a59246878c..7dc7373cf8 100644 --- a/website/docs/r/alert_configuration.html.markdown +++ b/website/docs/r/alert_configuration.html.markdown @@ -26,7 +26,7 @@ resource "mongodbatlas_alert_configuration" "test" { delay_min = 0 sms_enabled = false email_enabled = true - roles = ["GROUP_CHARTS_ADMIN", "GROUP_CLUSTER_MANAGER"] + roles = ["GROUP_CLUSTER_MANAGER"] } matcher { @@ -60,13 +60,13 @@ resource "mongodbatlas_alert_configuration" "test" { delay_min = 0 sms_enabled = false email_enabled = true - roles = ["GROUP_CHARTS_ADMIN", "GROUP_CLUSTER_MANAGER"] + roles = ["GROUP_CLUSTER_MANAGER"] } matcher { - field_name = "HOSTNAME_AND_PORT" + field_name = "CLUSTER_NAME" operator = "EQUALS" - value = "SECONDARY" + value = "my-cluster" } threshold_config { @@ -257,12 +257,11 @@ List of notifications to send when an alert condition is detected. | Project roles | Organization roles | |:---------- |:----------- | - | `GROUP_CHARTS_ADMIN` | `ORG_OWNER` | - | `GROUP_CLUSTER_MANAGER` | `ORG_MEMBER` | - | `GROUP_DATA_ACCESS_ADMIN` | `ORG_GROUP_CREATOR` | - | `GROUP_DATA_ACCESS_READ_ONLY` | `ORG_BILLING_ADMIN` | - | `GROUP_DATA_ACCESS_READ_WRITE` | `ORG_READ_ONLY` | - | `GROUP_OWNER` | | + | `GROUP_CLUSTER_MANAGER` | `ORG_OWNER` | + | `GROUP_DATA_ACCESS_ADMIN` | `ORG_MEMBER` | + | `GROUP_DATA_ACCESS_READ_ONLY` | `ORG_GROUP_CREATOR` | + | `GROUP_DATA_ACCESS_READ_WRITE` | `ORG_BILLING_ADMIN` | + | `GROUP_OWNER` | `ORG_READ_ONLY` | | `GROUP_READ_ONLY` | | ## Attributes Reference @@ -280,7 +279,7 @@ In addition to all arguments above, the following attributes are exported: Alert Configuration can be imported using the `project_id-alert_configuration_id`, e.g. ``` -$ terraform import mongodbatlas_alert_configuration.test 5d0f1f74cf09a29120e123cd-5d0f1f74cf09a29120e1fscg +terraform import mongodbatlas_alert_configuration.test 5d0f1f74cf09a29120e123cd-5d0f1f74cf09a29120e1fscg ``` **NOTE**: Third-party notifications will not contain their respective credentials as these are sensitive attributes. If you wish to perform updates on these notifications without providing the original credentials, the corresponding `notifier_id` attribute must be provided instead. From f2078c505493c22a818fc22b2a051f3401720acd Mon Sep 17 00:00:00 2001 From: svc-apix-Bot <142542575+svc-apix-Bot@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:43:23 +0200 Subject: [PATCH 10/84] chore: Updates Atlas Go SDK (#2380) * build(deps): bump go.mongodb.org/atlas-sdk * rename DiskBackupSnapshotAWSExportBucket to DiskBackupSnapshotExportBucket * add param to DeleteAtlasSearchDeployment * add LatestDefinition * more LatestDefinition and start using SearchIndexCreateRequest * HasElementsSliceOrMap * update * ToAnySlicePointer * fix update --------- Co-authored-by: lantoli <430982+lantoli@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- internal/common/conversion/collections.go | 24 +++++ .../common/conversion/collections_test.go | 63 +++++++++++ internal/common/conversion/flatten_expand.go | 2 +- internal/config/client.go | 2 +- .../data_source_accesslist_api_keys.go | 2 +- .../resource_access_list_api_key.go | 2 +- .../data_source_advanced_clusters.go | 2 +- .../advancedcluster/model_advanced_cluster.go | 2 +- .../model_advanced_cluster_test.go | 4 +- .../resource_advanced_cluster.go | 2 +- .../resource_advanced_cluster_test.go | 2 +- .../data_source_alert_configuration.go | 2 +- .../data_source_alert_configurations.go | 2 +- .../model_alert_configuration.go | 2 +- .../model_alert_configuration_test.go | 2 +- .../resource_alert_configuration.go | 2 +- .../service/apikey/data_source_api_keys.go | 2 +- internal/service/apikey/resource_api_key.go | 2 +- .../atlasuser/data_source_atlas_user.go | 2 +- .../atlasuser/data_source_atlas_user_test.go | 2 +- .../atlasuser/data_source_atlas_users.go | 2 +- .../atlasuser/data_source_atlas_users_test.go | 2 +- .../service/auditing/resource_auditing.go | 2 +- .../resource_backup_compliance_policy.go | 2 +- .../resource_cloud_backup_schedule.go | 2 +- ...ce_cloud_backup_schedule_migration_test.go | 2 +- .../resource_cloud_backup_schedule_test.go | 2 +- .../data_source_cloud_backup_snapshots.go | 2 +- .../model_cloud_backup_snapshot.go | 2 +- .../model_cloud_backup_snapshot_test.go | 2 +- .../resource_cloud_backup_snapshot.go | 2 +- ...ce_cloud_backup_snapshot_export_buckets.go | 4 +- ...rce_cloud_backup_snapshot_export_bucket.go | 4 +- ...urce_cloud_backup_snapshot_restore_jobs.go | 2 +- ...ource_cloud_backup_snapshot_restore_job.go | 2 +- ...rce_cloud_provider_access_authorization.go | 2 +- .../resource_cloud_provider_access_setup.go | 2 +- .../resource_cluster_outage_simulation.go | 2 +- .../service/controlplaneipaddresses/model.go | 2 +- .../controlplaneipaddresses/model_test.go | 2 +- .../data_source_custom_db_roles.go | 2 +- .../customdbrole/resource_custom_db_role.go | 2 +- ...ce_custom_dns_configuration_cluster_aws.go | 2 +- .../databaseuser/model_database_user.go | 2 +- .../databaseuser/model_database_user_test.go | 2 +- .../resource_database_user_migration_test.go | 2 +- .../resource_database_user_test.go | 2 +- .../data_source_data_lake_pipeline_run.go | 2 +- .../data_source_data_lake_pipeline_runs.go | 2 +- .../data_source_data_lake_pipelines.go | 2 +- .../resource_data_lake_pipeline.go | 2 +- .../model_encryption_at_rest.go | 2 +- .../model_encryption_at_rest_test.go | 2 +- .../resource_encryption_at_rest.go | 2 +- ...ource_encryption_at_rest_migration_test.go | 2 +- .../resource_encryption_at_rest_test.go | 4 +- ...source_federated_database_instance_test.go | 2 +- ...ata_source_federated_database_instances.go | 2 +- .../resource_federated_database_instance.go | 2 +- .../data_source_federated_query_limits.go | 2 +- .../resource_federated_query_limit.go | 2 +- ...e_federated_settings_identity_providers.go | 2 +- ...el_federated_settings_identity_provider.go | 2 +- ...derated_settings_identity_provider_test.go | 2 +- .../data_source_federated_settings.go | 2 +- ...ource_federated_settings_connected_orgs.go | 2 +- ...model_federated_settings_connected_orgs.go | 2 +- ...ce_federated_settings_org_role_mappings.go | 2 +- ...del_federated_settings_org_role_mapping.go | 2 +- ...rce_federated_settings_org_role_mapping.go | 2 +- .../resource_global_cluster_config.go | 2 +- .../resource_ldap_configuration.go | 2 +- .../ldapverify/resource_ldap_verify.go | 2 +- .../resource_maintenance_window.go | 2 +- .../data_source_network_containers.go | 2 +- .../resource_network_container.go | 2 +- .../data_source_network_peering.go | 2 +- .../data_source_network_peerings.go | 2 +- .../resource_network_peering.go | 2 +- .../onlinearchive/resource_online_archive.go | 2 +- .../organization/data_source_organizations.go | 2 +- .../organization/resource_organization.go | 2 +- .../resource_organization_test.go | 2 +- .../orginvitation/resource_org_invitation.go | 2 +- ...resource_private_endpoint_regional_mode.go | 2 +- .../resource_privatelink_endpoint.go | 2 +- ...esource_privatelink_endpoint_serverless.go | 2 +- .../resource_privatelink_endpoint_service.go | 2 +- ...service_data_federation_online_archives.go | 2 +- ..._service_data_federation_online_archive.go | 2 +- ...rivatelink_endpoints_service_serverless.go | 2 +- ...privatelink_endpoint_service_serverless.go | 2 +- .../service/project/data_source_project.go | 2 +- .../service/project/data_source_projects.go | 2 +- internal/service/project/model_project.go | 2 +- .../service/project/model_project_test.go | 2 +- internal/service/project/resource_project.go | 2 +- .../resource_project_migration_test.go | 2 +- .../service/project/resource_project_test.go | 4 +- .../projectapikey/resource_project_api_key.go | 2 +- .../resource_project_invitation.go | 2 +- .../model_project_ip_access_list.go | 2 +- .../model_project_ip_access_list_test.go | 2 +- .../resource_project_ip_access_list.go | 2 +- internal/service/pushbasedlogexport/model.go | 2 +- .../service/pushbasedlogexport/model_test.go | 2 +- .../service/pushbasedlogexport/resource.go | 2 +- .../pushbasedlogexport/state_transition.go | 2 +- .../state_transition_test.go | 4 +- .../model_search_deployment.go | 2 +- .../model_search_deployment_test.go | 2 +- .../resource_search_deployment.go | 2 +- .../state_transition_search_deployment.go | 2 +- ...state_transition_search_deployment_test.go | 4 +- .../searchindex/data_source_search_index.go | 18 ++-- .../searchindex/data_source_search_indexes.go | 23 ++-- .../searchindex/resource_search_index.go | 100 +++++++++--------- .../data_source_serverless_instances.go | 2 +- .../resource_serverless_instance.go | 2 +- .../resource_serverless_instance_test.go | 2 +- ...a_source_cloud_shared_tier_restore_jobs.go | 2 +- .../data_source_shared_tier_snapshots.go | 2 +- .../data_source_stream_connections.go | 2 +- .../data_source_stream_connections_test.go | 2 +- .../model_stream_connection.go | 2 +- .../model_stream_connection_test.go | 2 +- .../data_source_stream_instances.go | 2 +- .../data_source_stream_instances_test.go | 2 +- .../streaminstance/model_stream_instance.go | 2 +- .../model_stream_instance_test.go | 2 +- internal/service/team/data_source_team.go | 2 +- internal/service/team/resource_team.go | 2 +- .../data_source_third_party_integrations.go | 2 +- ...ource_x509_authentication_database_user.go | 2 +- internal/testutil/acc/atlas.go | 2 +- internal/testutil/acc/database_user.go | 2 +- internal/testutil/acc/factory.go | 2 +- internal/testutil/acc/project.go | 2 +- internal/testutil/acc/serverless.go | 2 +- 141 files changed, 302 insertions(+), 214 deletions(-) create mode 100644 internal/common/conversion/collections.go create mode 100644 internal/common/conversion/collections_test.go diff --git a/go.mod b/go.mod index a2f490822d..88567f43e9 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/zclconf/go-cty v1.14.4 go.mongodb.org/atlas v0.36.0 - go.mongodb.org/atlas-sdk/v20231115014 v20231115014.0.0 + go.mongodb.org/atlas-sdk/v20240530002 v20240530002.0.0 go.mongodb.org/realm v0.1.0 ) diff --git a/go.sum b/go.sum index a73b683deb..c42a947457 100644 --- a/go.sum +++ b/go.sum @@ -780,8 +780,8 @@ github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgK go.mongodb.org/atlas v0.12.0/go.mod h1:wVCnHcm/7/IfTjEB6K8K35PLG70yGz8BdkRwX0oK9/M= go.mongodb.org/atlas v0.36.0 h1:m05S3AO7zkl+bcG1qaNsEKBnAqnKx2FDwLooHpIG3j4= go.mongodb.org/atlas v0.36.0/go.mod h1:nfPldE9dSama6G2IbIzmEza02Ly7yFZjMMVscaM0uEc= -go.mongodb.org/atlas-sdk/v20231115014 v20231115014.0.0 h1:hN7x3m6THf03q/tE48up1j0U/26lJmx+s1LXB/qvHHc= -go.mongodb.org/atlas-sdk/v20231115014 v20231115014.0.0/go.mod h1:pCl46YnWOIde8lq27whXDwUseNeUvtAy3vy5ZDeTcBA= +go.mongodb.org/atlas-sdk/v20240530002 v20240530002.0.0 h1:D+e3bpRwa9WH3HHs8bLjOdjTp1vdlp83ZYithzGbaQ8= +go.mongodb.org/atlas-sdk/v20240530002 v20240530002.0.0/go.mod h1:seuG5HpfG20/8FhJGyWi4yL7hqAcmq7pf/G0gipNOyM= go.mongodb.org/realm v0.1.0 h1:zJiXyLaZrznQ+Pz947ziSrDKUep39DO4SfA0Fzx8M4M= go.mongodb.org/realm v0.1.0/go.mod h1:4Vj6iy+Puo1TDERcoh4XZ+pjtwbOzPpzqy3Cwe8ZmDM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= diff --git a/internal/common/conversion/collections.go b/internal/common/conversion/collections.go new file mode 100644 index 0000000000..c4c14b07e8 --- /dev/null +++ b/internal/common/conversion/collections.go @@ -0,0 +1,24 @@ +package conversion + +import "reflect" + +// HasElementsSliceOrMap checks if param is a non-empty slice or map +func HasElementsSliceOrMap(value any) bool { + v := reflect.ValueOf(value) + if v.Kind() == reflect.Slice || v.Kind() == reflect.Map { + return v.Len() > 0 + } + return false +} + +// ToAnySlicePointer converts to a slice pointer of any as needed in some Atlas SDK Go structs +func ToAnySlicePointer(value *[]map[string]any) *[]any { + if value == nil { + return nil + } + ret := make([]any, len(*value)) + for i, item := range *value { + ret[i] = item + } + return &ret +} diff --git a/internal/common/conversion/collections_test.go b/internal/common/conversion/collections_test.go new file mode 100644 index 0000000000..8132997aa3 --- /dev/null +++ b/internal/common/conversion/collections_test.go @@ -0,0 +1,63 @@ +package conversion_test + +import ( + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/stretchr/testify/assert" +) + +func TestHasElementsSliceOrMap(t *testing.T) { + testCasesTrue := map[string]any{ + "slice": []string{"hi"}, + "map": map[string]string{"hi": "there"}, + "int int map": map[int]int{1: 2}, + "double map": map[string]map[string]string{ + "hi": {"there": "bye"}, + }, + } + testCasesFalse := map[string]any{ + "nil": nil, + "empty slice": []string{}, + "empty map": map[string]string{}, + "empty int int map": map[int]int{}, + "not a collection but with len": "hello", + "random object": 123, + } + for name, value := range testCasesTrue { + t.Run(name, func(t *testing.T) { + assert.True(t, conversion.HasElementsSliceOrMap(value)) + }) + } + for name, value := range testCasesFalse { + t.Run(name, func(t *testing.T) { + assert.False(t, conversion.HasElementsSliceOrMap(value)) + }) + } +} + +func TestToAnySlicePointer(t *testing.T) { + testCases := map[string]*[]map[string]any{ + "nil": nil, + "empty": {}, + "one element": {{"hi": "there"}}, + "more complex": { + {"hi": "there"}, + {"bye": 1234}, + }, + } + for name, value := range testCases { + t.Run(name, func(t *testing.T) { + ret := conversion.ToAnySlicePointer(value) + if ret == nil { + assert.Nil(t, value) + } else { + assert.NotNil(t, ret) + assert.Equal(t, len(*value), len(*ret)) + for i := range *value { + assert.Equal(t, (*value)[i], (*ret)[i]) + } + } + }) + } +} diff --git a/internal/common/conversion/flatten_expand.go b/internal/common/conversion/flatten_expand.go index 6608020a3a..229934db0e 100644 --- a/internal/common/conversion/flatten_expand.go +++ b/internal/common/conversion/flatten_expand.go @@ -3,7 +3,7 @@ package conversion import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func FlattenLinks(links []admin.Link) []map[string]string { diff --git a/internal/config/client.go b/internal/config/client.go index d87b0574ce..f9b5f93e02 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" matlasClient "go.mongodb.org/atlas/mongodbatlas" realmAuth "go.mongodb.org/realm/auth" "go.mongodb.org/realm/realm" diff --git a/internal/service/accesslistapikey/data_source_accesslist_api_keys.go b/internal/service/accesslistapikey/data_source_accesslist_api_keys.go index 73b33e4096..ccc34007c2 100644 --- a/internal/service/accesslistapikey/data_source_accesslist_api_keys.go +++ b/internal/service/accesslistapikey/data_source_accesslist_api_keys.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/accesslistapikey/resource_access_list_api_key.go b/internal/service/accesslistapikey/resource_access_list_api_key.go index ce645e9f55..1eaf6751f5 100644 --- a/internal/service/accesslistapikey/resource_access_list_api_key.go +++ b/internal/service/accesslistapikey/resource_access_list_api_key.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/advancedcluster/data_source_advanced_clusters.go b/internal/service/advancedcluster/data_source_advanced_clusters.go index 867b0b5cdd..ed6736d947 100644 --- a/internal/service/advancedcluster/data_source_advanced_clusters.go +++ b/internal/service/advancedcluster/data_source_advanced_clusters.go @@ -12,7 +12,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 9cb280e575..0bd011c764 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var ( diff --git a/internal/service/advancedcluster/model_advanced_cluster_test.go b/internal/service/advancedcluster/model_advanced_cluster_test.go index a63e0e648b..7598e8511b 100644 --- a/internal/service/advancedcluster/model_advanced_cluster_test.go +++ b/internal/service/advancedcluster/model_advanced_cluster_test.go @@ -12,8 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20231115014/mockadmin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" ) var ( diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 9a1b68f110..2bb01aac6e 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -21,7 +21,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 8f56acd0ab..80823e1735 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/alertconfiguration/data_source_alert_configuration.go b/internal/service/alertconfiguration/data_source_alert_configuration.go index 16c07c7aac..2aebfb60c1 100644 --- a/internal/service/alertconfiguration/data_source_alert_configuration.go +++ b/internal/service/alertconfiguration/data_source_alert_configuration.go @@ -14,7 +14,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/zclconf/go-cty/cty" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var _ datasource.DataSource = &alertConfigurationDS{} diff --git a/internal/service/alertconfiguration/data_source_alert_configurations.go b/internal/service/alertconfiguration/data_source_alert_configurations.go index f4b2790037..6b178aae06 100644 --- a/internal/service/alertconfiguration/data_source_alert_configurations.go +++ b/internal/service/alertconfiguration/data_source_alert_configurations.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const alertConfigurationsDataSourceName = "alert_configurations" diff --git a/internal/service/alertconfiguration/model_alert_configuration.go b/internal/service/alertconfiguration/model_alert_configuration.go index b0f2fb1daa..ef42960051 100644 --- a/internal/service/alertconfiguration/model_alert_configuration.go +++ b/internal/service/alertconfiguration/model_alert_configuration.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewNotificationList(list []TfNotificationModel) (*[]admin.AlertsNotificationRootForGroup, error) { diff --git a/internal/service/alertconfiguration/model_alert_configuration_test.go b/internal/service/alertconfiguration/model_alert_configuration_test.go index 6c0ff81cfe..7fa162fd7d 100644 --- a/internal/service/alertconfiguration/model_alert_configuration_test.go +++ b/internal/service/alertconfiguration/model_alert_configuration_test.go @@ -7,7 +7,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/alertconfiguration" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/alertconfiguration/resource_alert_configuration.go b/internal/service/alertconfiguration/resource_alert_configuration.go index b15bc85e7e..29b72498f1 100644 --- a/internal/service/alertconfiguration/resource_alert_configuration.go +++ b/internal/service/alertconfiguration/resource_alert_configuration.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/apikey/data_source_api_keys.go b/internal/service/apikey/data_source_api_keys.go index 35426f3e44..85ef1db062 100644 --- a/internal/service/apikey/data_source_api_keys.go +++ b/internal/service/apikey/data_source_api_keys.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/apikey/resource_api_key.go b/internal/service/apikey/resource_api_key.go index e0fdc75674..2bbd1449c9 100644 --- a/internal/service/apikey/resource_api_key.go +++ b/internal/service/apikey/resource_api_key.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/atlasuser/data_source_atlas_user.go b/internal/service/atlasuser/data_source_atlas_user.go index 06df30c7dc..5bae40ac96 100644 --- a/internal/service/atlasuser/data_source_atlas_user.go +++ b/internal/service/atlasuser/data_source_atlas_user.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/atlasuser/data_source_atlas_user_test.go b/internal/service/atlasuser/data_source_atlas_user_test.go index bb5a8a676f..42d0a594e4 100644 --- a/internal/service/atlasuser/data_source_atlas_user_test.go +++ b/internal/service/atlasuser/data_source_atlas_user_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestAccConfigDSAtlasUser_ByUserID(t *testing.T) { diff --git a/internal/service/atlasuser/data_source_atlas_users.go b/internal/service/atlasuser/data_source_atlas_users.go index 88a8a46737..70f6973475 100644 --- a/internal/service/atlasuser/data_source_atlas_users.go +++ b/internal/service/atlasuser/data_source_atlas_users.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/atlasuser/data_source_atlas_users_test.go b/internal/service/atlasuser/data_source_atlas_users_test.go index 405ee22c19..29f926b319 100644 --- a/internal/service/atlasuser/data_source_atlas_users_test.go +++ b/internal/service/atlasuser/data_source_atlas_users_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/atlasuser" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestAccConfigDSAtlasUsers_ByOrgID(t *testing.T) { diff --git a/internal/service/auditing/resource_auditing.go b/internal/service/auditing/resource_auditing.go index 41670d827f..bd4024eee5 100644 --- a/internal/service/auditing/resource_auditing.go +++ b/internal/service/auditing/resource_auditing.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go index cec407fc6c..b542c87056 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go @@ -8,7 +8,7 @@ import ( "net/http" "strings" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go index 9a77376a19..19a1e70d05 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go @@ -12,7 +12,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index a584571e79..2caacc8108 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -7,7 +7,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 9bb7898bb5..3d0bb0bf60 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var ( diff --git a/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go b/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go index c1e2fa3eed..c5cc844e62 100644 --- a/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go +++ b/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go index 00ea9cdc57..6c3539b16f 100644 --- a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go +++ b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go @@ -4,7 +4,7 @@ import ( "errors" "regexp" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func SplitSnapshotImportID(id string) (*admin.GetReplicaSetBackupApiParams, error) { diff --git a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go index 59e3a91dc3..2279919f71 100644 --- a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go @@ -5,7 +5,7 @@ import ( "github.com/go-test/deep" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupsnapshot" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestSplitSnapshotImportID(t *testing.T) { diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go index c2e8e8f8f1..beb904109f 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go @@ -14,7 +14,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cluster" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go index 8ab51bbb2f..8b1b93feef 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go +++ b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { @@ -83,7 +83,7 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return nil } -func flattenBuckets(buckets []admin.DiskBackupSnapshotAWSExportBucket) []map[string]any { +func flattenBuckets(buckets []admin.DiskBackupSnapshotExportBucket) []map[string]any { var results []map[string]any if len(buckets) == 0 { diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go index 5db931461a..0da3e4a58f 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { @@ -72,7 +72,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.Errorf("atlas only supports AWS") } - request := &admin.DiskBackupSnapshotAWSExportBucket{ + request := &admin.DiskBackupSnapshotExportBucket{ IamRoleId: conversion.StringPtr(d.Get("iam_role_id").(string)), BucketName: conversion.StringPtr(d.Get("bucket_name").(string)), CloudProvider: &cloudProvider, diff --git a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go index c2490cc147..78d743e3ab 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go +++ b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go index 2ae6240ba1..682e36a27f 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go @@ -13,7 +13,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go index 866225fe31..43c2c06a53 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go @@ -12,7 +12,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) /* diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go index f1fa66be18..d796f48bdc 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go @@ -6,7 +6,7 @@ import ( "net/http" "regexp" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go index fff252de53..284d1d04c2 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/controlplaneipaddresses/model.go b/internal/service/controlplaneipaddresses/model.go index a440f66308..a99a367c56 100644 --- a/internal/service/controlplaneipaddresses/model.go +++ b/internal/service/controlplaneipaddresses/model.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewTFControlPlaneIPAddresses(ctx context.Context, apiResp *admin.ControlPlaneIPAddresses) (*TFControlPlaneIpAddressesModel, diag.Diagnostics) { diff --git a/internal/service/controlplaneipaddresses/model_test.go b/internal/service/controlplaneipaddresses/model_test.go index 9d7f9b6fb3..c550719e7f 100644 --- a/internal/service/controlplaneipaddresses/model_test.go +++ b/internal/service/controlplaneipaddresses/model_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/controlplaneipaddresses" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) type sdkToTFModelTestCase struct { diff --git a/internal/service/customdbrole/data_source_custom_db_roles.go b/internal/service/customdbrole/data_source_custom_db_roles.go index c127f071d7..a46c8f9542 100644 --- a/internal/service/customdbrole/data_source_custom_db_roles.go +++ b/internal/service/customdbrole/data_source_custom_db_roles.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/customdbrole/resource_custom_db_role.go b/internal/service/customdbrole/resource_custom_db_role.go index 206d4de1f5..1ba4bab266 100644 --- a/internal/service/customdbrole/resource_custom_db_role.go +++ b/internal/service/customdbrole/resource_custom_db_role.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go index ba6894ce4c..5ce4f48c4e 100644 --- a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go +++ b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/databaseuser/model_database_user.go b/internal/service/databaseuser/model_database_user.go index d1cd75cdcc..113f31f4e5 100644 --- a/internal/service/databaseuser/model_database_user.go +++ b/internal/service/databaseuser/model_database_user.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewMongoDBDatabaseUser(ctx context.Context, statePasswordValue types.String, dbUserModel *TfDatabaseUserModel) (*admin.CloudDatabaseUser, diag.Diagnostics) { diff --git a/internal/service/databaseuser/model_database_user_test.go b/internal/service/databaseuser/model_database_user_test.go index 6033aeb2f1..4ba4f849cb 100644 --- a/internal/service/databaseuser/model_database_user_test.go +++ b/internal/service/databaseuser/model_database_user_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/databaseuser" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var ( diff --git a/internal/service/databaseuser/resource_database_user_migration_test.go b/internal/service/databaseuser/resource_database_user_migration_test.go index 58c706740e..081a6f8212 100644 --- a/internal/service/databaseuser/resource_database_user_migration_test.go +++ b/internal/service/databaseuser/resource_database_user_migration_test.go @@ -3,7 +3,7 @@ package databaseuser_test import ( "testing" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" diff --git a/internal/service/databaseuser/resource_database_user_test.go b/internal/service/databaseuser/resource_database_user_test.go index 9f3c133604..c384b94de3 100644 --- a/internal/service/databaseuser/resource_database_user_test.go +++ b/internal/service/databaseuser/resource_database_user_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/databaseuser" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go index d62db2c0ad..e772c39cf6 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const errorDataLakePipelineRunRead = "error reading MongoDB Atlas DataLake Run (%s): %s" diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go index 93bb7bbe68..ef548c46b9 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const errorDataLakePipelineRunList = "error reading MongoDB Atlas DataLake Runs (%s): %s" diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go index 2dab6da217..41adab2c44 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const errorDataLakePipelineList = "error creating MongoDB Atlas DataLake Pipelines: %s" diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline.go b/internal/service/datalakepipeline/resource_data_lake_pipeline.go index cb9cbdcd26..9d76b99053 100644 --- a/internal/service/datalakepipeline/resource_data_lake_pipeline.go +++ b/internal/service/datalakepipeline/resource_data_lake_pipeline.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/encryptionatrest/model_encryption_at_rest.go b/internal/service/encryptionatrest/model_encryption_at_rest.go index eeea1f927f..d52e8ada5b 100644 --- a/internal/service/encryptionatrest/model_encryption_at_rest.go +++ b/internal/service/encryptionatrest/model_encryption_at_rest.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewTfEncryptionAtRestRSModel(ctx context.Context, projectID string, encryptionResp *admin.EncryptionAtRest) *TfEncryptionAtRestRSModel { diff --git a/internal/service/encryptionatrest/model_encryption_at_rest_test.go b/internal/service/encryptionatrest/model_encryption_at_rest_test.go index 96ba4f4fc5..e451e85c9c 100644 --- a/internal/service/encryptionatrest/model_encryption_at_rest_test.go +++ b/internal/service/encryptionatrest/model_encryption_at_rest_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/encryptionatrest" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var ( diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest.go b/internal/service/encryptionatrest/resource_encryption_at_rest.go index 55dd3a4906..8ba9b7de6b 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest.go @@ -24,7 +24,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/project" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go index 7261469b00..279738d987 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestMigEncryptionAtRest_basicAWS(t *testing.T) { diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go index 2b8ddbccfd..d44e941a0b 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go @@ -16,8 +16,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20231115014/mockadmin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" ) const ( diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go index 8e54b4d5f0..8a8158e399 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestAccFederatedDatabaseInstanceDS_s3Bucket(t *testing.T) { diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go index b5c2ecefce..327ec41abe 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance.go index c562882835..48b4d2c1ff 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance.go @@ -7,7 +7,7 @@ import ( "net/http" "strings" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/federatedquerylimit/data_source_federated_query_limits.go b/internal/service/federatedquerylimit/data_source_federated_query_limits.go index b69044e2df..20b8257250 100644 --- a/internal/service/federatedquerylimit/data_source_federated_query_limits.go +++ b/internal/service/federatedquerylimit/data_source_federated_query_limits.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedquerylimit/resource_federated_query_limit.go b/internal/service/federatedquerylimit/resource_federated_query_limit.go index 7b6694d515..58ceb1f7d5 100644 --- a/internal/service/federatedquerylimit/resource_federated_query_limit.go +++ b/internal/service/federatedquerylimit/resource_federated_query_limit.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go index 4b2a598929..73645c947a 100644 --- a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go +++ b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go index 0292f0fd3b..dfddcbcec5 100644 --- a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go +++ b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" diff --git a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go index cc79469ee5..a4a8f9b261 100644 --- a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go +++ b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/stretchr/testify/assert" diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go index 91c8469cd6..62d6ce0ba4 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func DataSourceSettings() *schema.Resource { diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go index 695ec0c88e..d9a948215f 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go b/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go index f414c3d829..fdc06ffc07 100644 --- a/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go +++ b/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) type roleMappingsByGroupName []admin.AuthFederationRoleMapping diff --git a/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go b/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go index 51d02623ea..f8371255ff 100644 --- a/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go +++ b/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go b/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go index d010b3b1b1..bd411c53fd 100644 --- a/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go +++ b/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) type mRoleAssignment []admin.RoleAssignment diff --git a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go index ec8df70c60..f9e9df91bd 100644 --- a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go +++ b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config.go b/internal/service/globalclusterconfig/resource_global_cluster_config.go index db158960ab..edcbd33111 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration.go b/internal/service/ldapconfiguration/resource_ldap_configuration.go index cc98a943ba..a64c54b400 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/ldapverify/resource_ldap_verify.go b/internal/service/ldapverify/resource_ldap_verify.go index ecd1f5a61a..a8ad9cf9a1 100644 --- a/internal/service/ldapverify/resource_ldap_verify.go +++ b/internal/service/ldapverify/resource_ldap_verify.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/maintenancewindow/resource_maintenance_window.go b/internal/service/maintenancewindow/resource_maintenance_window.go index 49cba64608..85ff7891b6 100644 --- a/internal/service/maintenancewindow/resource_maintenance_window.go +++ b/internal/service/maintenancewindow/resource_maintenance_window.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/networkcontainer/data_source_network_containers.go b/internal/service/networkcontainer/data_source_network_containers.go index dc6ccd4a25..871928b474 100644 --- a/internal/service/networkcontainer/data_source_network_containers.go +++ b/internal/service/networkcontainer/data_source_network_containers.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/networkcontainer/resource_network_container.go b/internal/service/networkcontainer/resource_network_container.go index f8ae639d47..b185391b36 100644 --- a/internal/service/networkcontainer/resource_network_container.go +++ b/internal/service/networkcontainer/resource_network_container.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/networkpeering/data_source_network_peering.go b/internal/service/networkpeering/data_source_network_peering.go index f020874fb2..74ac732407 100644 --- a/internal/service/networkpeering/data_source_network_peering.go +++ b/internal/service/networkpeering/data_source_network_peering.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func DataSource() *schema.Resource { diff --git a/internal/service/networkpeering/data_source_network_peerings.go b/internal/service/networkpeering/data_source_network_peerings.go index 1d722399df..3dc1967aa0 100644 --- a/internal/service/networkpeering/data_source_network_peerings.go +++ b/internal/service/networkpeering/data_source_network_peerings.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/networkpeering/resource_network_peering.go b/internal/service/networkpeering/resource_network_peering.go index f00fd13b89..1058774dd2 100644 --- a/internal/service/networkpeering/resource_network_peering.go +++ b/internal/service/networkpeering/resource_network_peering.go @@ -16,7 +16,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/networkcontainer" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/onlinearchive/resource_online_archive.go b/internal/service/onlinearchive/resource_online_archive.go index b3ab69a7eb..5f9b17b12b 100644 --- a/internal/service/onlinearchive/resource_online_archive.go +++ b/internal/service/onlinearchive/resource_online_archive.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/organization/data_source_organizations.go b/internal/service/organization/data_source_organizations.go index 770c03338c..b1d209ef46 100644 --- a/internal/service/organization/data_source_organizations.go +++ b/internal/service/organization/data_source_organizations.go @@ -5,7 +5,7 @@ import ( "fmt" "log" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/organization/resource_organization.go b/internal/service/organization/resource_organization.go index af728fadbd..6a7c38fc34 100644 --- a/internal/service/organization/resource_organization.go +++ b/internal/service/organization/resource_organization.go @@ -6,7 +6,7 @@ import ( "log" "net/http" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index 21ef373574..22095111ab 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -7,7 +7,7 @@ import ( "regexp" "testing" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" diff --git a/internal/service/orginvitation/resource_org_invitation.go b/internal/service/orginvitation/resource_org_invitation.go index b978dcc1b6..fb64f43946 100644 --- a/internal/service/orginvitation/resource_org_invitation.go +++ b/internal/service/orginvitation/resource_org_invitation.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go index 8a8a47e7bd..f3eb5f7c95 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) type permCtxKey string diff --git a/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go b/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go index 52543d35db..168bcbe263 100644 --- a/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go +++ b/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go index 5c3de42ceb..729332facc 100644 --- a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go +++ b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go index 721db88114..7c0ccbd942 100644 --- a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go +++ b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go index c8c9e4238b..ac86d494a2 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/datalakepipeline" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const errorPrivateEndpointServiceDataFederationOnlineArchiveList = "error reading Private Endpoings for projectId %s: %s" diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go index 5c0fa899ab..70a41e734f 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go b/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go index d979c39252..58bc37361f 100644 --- a/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go +++ b/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go index 6c238b442f..ed335d3a18 100644 --- a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go +++ b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/project/data_source_project.go b/internal/service/project/data_source_project.go index 268adf8c41..2cbc63330f 100644 --- a/internal/service/project/data_source_project.go +++ b/internal/service/project/data_source_project.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/datasource" diff --git a/internal/service/project/data_source_projects.go b/internal/service/project/data_source_projects.go index ff09c16f3b..eff493d60c 100644 --- a/internal/service/project/data_source_projects.go +++ b/internal/service/project/data_source_projects.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const projectsDataSourceName = "projects" diff --git a/internal/service/project/model_project.go b/internal/service/project/model_project.go index 7602a85bfd..628bbb687e 100644 --- a/internal/service/project/model_project.go +++ b/internal/service/project/model_project.go @@ -3,7 +3,7 @@ package project import ( "context" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" diff --git a/internal/service/project/model_project_test.go b/internal/service/project/model_project_test.go index 735c137a66..81dca2d660 100644 --- a/internal/service/project/model_project_test.go +++ b/internal/service/project/model_project_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" diff --git a/internal/service/project/resource_project.go b/internal/service/project/resource_project.go index 7874cc7c64..1244f0728d 100644 --- a/internal/service/project/resource_project.go +++ b/internal/service/project/resource_project.go @@ -9,7 +9,7 @@ import ( "sort" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/path" diff --git a/internal/service/project/resource_project_migration_test.go b/internal/service/project/resource_project_migration_test.go index 66b8aea30e..ec7ed63e1c 100644 --- a/internal/service/project/resource_project_migration_test.go +++ b/internal/service/project/resource_project_migration_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" diff --git a/internal/service/project/resource_project_test.go b/internal/service/project/resource_project_test.go index 139873b29d..973c5ec205 100644 --- a/internal/service/project/resource_project_test.go +++ b/internal/service/project/resource_project_test.go @@ -11,8 +11,8 @@ import ( "strings" "testing" - "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20231115014/mockadmin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" diff --git a/internal/service/projectapikey/resource_project_api_key.go b/internal/service/projectapikey/resource_project_api_key.go index 8397e09bf0..518733b8f4 100644 --- a/internal/service/projectapikey/resource_project_api_key.go +++ b/internal/service/projectapikey/resource_project_api_key.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" matlas "go.mongodb.org/atlas/mongodbatlas" ) diff --git a/internal/service/projectinvitation/resource_project_invitation.go b/internal/service/projectinvitation/resource_project_invitation.go index 744d4d4f26..4172d0081e 100644 --- a/internal/service/projectinvitation/resource_project_invitation.go +++ b/internal/service/projectinvitation/resource_project_invitation.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/projectipaccesslist/model_project_ip_access_list.go b/internal/service/projectipaccesslist/model_project_ip_access_list.go index 0d1facee88..a33e77c34f 100644 --- a/internal/service/projectipaccesslist/model_project_ip_access_list.go +++ b/internal/service/projectipaccesslist/model_project_ip_access_list.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewMongoDBProjectIPAccessList(projectIPAccessListModel *TfProjectIPAccessListModel) *[]admin.NetworkPermissionEntry { diff --git a/internal/service/projectipaccesslist/model_project_ip_access_list_test.go b/internal/service/projectipaccesslist/model_project_ip_access_list_test.go index 1567020a18..282939b0a6 100644 --- a/internal/service/projectipaccesslist/model_project_ip_access_list_test.go +++ b/internal/service/projectipaccesslist/model_project_ip_access_list_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/projectipaccesslist" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var ( diff --git a/internal/service/projectipaccesslist/resource_project_ip_access_list.go b/internal/service/projectipaccesslist/resource_project_ip_access_list.go index 2027573cbe..07b91ffdfc 100644 --- a/internal/service/projectipaccesslist/resource_project_ip_access_list.go +++ b/internal/service/projectipaccesslist/resource_project_ip_access_list.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" diff --git a/internal/service/pushbasedlogexport/model.go b/internal/service/pushbasedlogexport/model.go index 8dabfd31b5..c32f52e514 100644 --- a/internal/service/pushbasedlogexport/model.go +++ b/internal/service/pushbasedlogexport/model.go @@ -3,7 +3,7 @@ package pushbasedlogexport import ( "context" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/diag" diff --git a/internal/service/pushbasedlogexport/model_test.go b/internal/service/pushbasedlogexport/model_test.go index b08f339ff5..10e1678d18 100644 --- a/internal/service/pushbasedlogexport/model_test.go +++ b/internal/service/pushbasedlogexport/model_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/types" diff --git a/internal/service/pushbasedlogexport/resource.go b/internal/service/pushbasedlogexport/resource.go index 5a20ffa19f..aad810d34c 100644 --- a/internal/service/pushbasedlogexport/resource.go +++ b/internal/service/pushbasedlogexport/resource.go @@ -7,7 +7,7 @@ import ( "slices" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" diff --git a/internal/service/pushbasedlogexport/state_transition.go b/internal/service/pushbasedlogexport/state_transition.go index b04d8b43fd..3286736b13 100644 --- a/internal/service/pushbasedlogexport/state_transition.go +++ b/internal/service/pushbasedlogexport/state_transition.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/pushbasedlogexport/state_transition_test.go b/internal/service/pushbasedlogexport/state_transition_test.go index 492f826a61..137d774d6e 100644 --- a/internal/service/pushbasedlogexport/state_transition_test.go +++ b/internal/service/pushbasedlogexport/state_transition_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20231115014/mockadmin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" diff --git a/internal/service/searchdeployment/model_search_deployment.go b/internal/service/searchdeployment/model_search_deployment.go index e6c60bdf43..c6f80c5f1f 100644 --- a/internal/service/searchdeployment/model_search_deployment.go +++ b/internal/service/searchdeployment/model_search_deployment.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewSearchDeploymentReq(ctx context.Context, searchDeploymentPlan *TFSearchDeploymentRSModel) admin.ApiSearchDeploymentRequest { diff --git a/internal/service/searchdeployment/model_search_deployment_test.go b/internal/service/searchdeployment/model_search_deployment_test.go index a9d10c3978..643c3dd458 100644 --- a/internal/service/searchdeployment/model_search_deployment_test.go +++ b/internal/service/searchdeployment/model_search_deployment_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/searchdeployment" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) type sdkToTFModelTestCase struct { diff --git a/internal/service/searchdeployment/resource_search_deployment.go b/internal/service/searchdeployment/resource_search_deployment.go index 69a28ad9bc..db4cf8091e 100644 --- a/internal/service/searchdeployment/resource_search_deployment.go +++ b/internal/service/searchdeployment/resource_search_deployment.go @@ -154,7 +154,7 @@ func (r *searchDeploymentRS) Delete(ctx context.Context, req resource.DeleteRequ connV2 := r.Client.AtlasV2 projectID := searchDeploymentState.ProjectID.ValueString() clusterName := searchDeploymentState.ClusterName.ValueString() - if _, err := connV2.AtlasSearchApi.DeleteAtlasSearchDeployment(ctx, projectID, clusterName).Execute(); err != nil { + if _, _, err := connV2.AtlasSearchApi.DeleteAtlasSearchDeployment(ctx, projectID, clusterName).Execute(); err != nil { resp.Diagnostics.AddError("error during search deployment delete", err.Error()) return } diff --git a/internal/service/searchdeployment/state_transition_search_deployment.go b/internal/service/searchdeployment/state_transition_search_deployment.go index 3754c0a77f..ff0ea37ab1 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment.go +++ b/internal/service/searchdeployment/state_transition_search_deployment.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/retrystrategy" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const SearchDeploymentDoesNotExistsError = "ATLAS_FTS_DEPLOYMENT_DOES_NOT_EXIST" diff --git a/internal/service/searchdeployment/state_transition_search_deployment_test.go b/internal/service/searchdeployment/state_transition_search_deployment_test.go index 12eba44218..ea9b197a50 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment_test.go +++ b/internal/service/searchdeployment/state_transition_search_deployment_test.go @@ -12,8 +12,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/searchdeployment" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20231115014/mockadmin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" ) var ( diff --git a/internal/service/searchindex/data_source_search_index.go b/internal/service/searchindex/data_source_search_index.go index 529a0417dd..3283ff1c8c 100644 --- a/internal/service/searchindex/data_source_search_index.go +++ b/internal/service/searchindex/data_source_search_index.go @@ -123,11 +123,11 @@ func dataSourceMongoDBAtlasSearchIndexRead(ctx context.Context, d *schema.Resour return diag.Errorf("error setting `index_id` for search index (%s): %s", d.Id(), err) } - if err := d.Set("analyzer", searchIndex.Analyzer); err != nil { + if err := d.Set("analyzer", searchIndex.LatestDefinition.Analyzer); err != nil { return diag.Errorf("error setting `analyzer` for search index (%s): %s", d.Id(), err) } - if analyzers := searchIndex.GetAnalyzers(); len(analyzers) > 0 { + if analyzers := searchIndex.LatestDefinition.GetAnalyzers(); len(analyzers) > 0 { searchIndexMappingFields, err := marshalSearchIndex(analyzers) if err != nil { return diag.FromErr(err) @@ -150,21 +150,21 @@ func dataSourceMongoDBAtlasSearchIndexRead(ctx context.Context, d *schema.Resour return diag.Errorf("error setting `name` for search index (%s): %s", d.Id(), err) } - if err := d.Set("search_analyzer", searchIndex.SearchAnalyzer); err != nil { + if err := d.Set("search_analyzer", searchIndex.LatestDefinition.SearchAnalyzer); err != nil { return diag.Errorf("error setting `searchAnalyzer` for search index (%s): %s", d.Id(), err) } - if err := d.Set("synonyms", flattenSearchIndexSynonyms(searchIndex.GetSynonyms())); err != nil { + if err := d.Set("synonyms", flattenSearchIndexSynonyms(searchIndex.LatestDefinition.GetSynonyms())); err != nil { return diag.Errorf("error setting `synonyms` for search index (%s): %s", d.Id(), err) } - if searchIndex.Mappings != nil { - if err := d.Set("mappings_dynamic", searchIndex.Mappings.Dynamic); err != nil { + if searchIndex.LatestDefinition.Mappings != nil { + if err := d.Set("mappings_dynamic", searchIndex.LatestDefinition.Mappings.Dynamic); err != nil { return diag.Errorf("error setting `mappings_dynamic` for search index (%s): %s", d.Id(), err) } - if len(searchIndex.Mappings.Fields) > 0 { - searchIndexMappingFields, err := marshalSearchIndex(searchIndex.Mappings.Fields) + if conversion.HasElementsSliceOrMap(searchIndex.LatestDefinition.Mappings.Fields) { + searchIndexMappingFields, err := marshalSearchIndex(searchIndex.LatestDefinition.Mappings.Fields) if err != nil { return diag.FromErr(err) } @@ -174,7 +174,7 @@ func dataSourceMongoDBAtlasSearchIndexRead(ctx context.Context, d *schema.Resour } } - if fields := searchIndex.GetFields(); len(fields) > 0 { + if fields := searchIndex.LatestDefinition.GetFields(); len(fields) > 0 { fieldsMarshaled, err := marshalSearchIndex(fields) if err != nil { return diag.FromErr(err) diff --git a/internal/service/searchindex/data_source_search_indexes.go b/internal/service/searchindex/data_source_search_indexes.go index ed65969d97..63a272af64 100644 --- a/internal/service/searchindex/data_source_search_indexes.go +++ b/internal/service/searchindex/data_source_search_indexes.go @@ -6,8 +6,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { @@ -80,7 +81,7 @@ func dataSourceMongoDBAtlasSearchIndexesRead(ctx context.Context, d *schema.Reso return nil } -func flattenSearchIndexes(searchIndexes []admin.ClusterSearchIndex, projectID, clusterName string) ([]map[string]any, error) { +func flattenSearchIndexes(searchIndexes []admin.SearchIndexResponse, projectID, clusterName string) ([]map[string]any, error) { var searchIndexesMap []map[string]any if len(searchIndexes) == 0 { @@ -92,22 +93,22 @@ func flattenSearchIndexes(searchIndexes []admin.ClusterSearchIndex, projectID, c searchIndexesMap[i] = map[string]any{ "project_id": projectID, "cluster_name": clusterName, - "analyzer": searchIndexes[i].Analyzer, + "analyzer": searchIndexes[i].LatestDefinition.Analyzer, "collection_name": searchIndexes[i].CollectionName, "database": searchIndexes[i].Database, "index_id": searchIndexes[i].IndexID, "name": searchIndexes[i].Name, - "search_analyzer": searchIndexes[i].SearchAnalyzer, + "search_analyzer": searchIndexes[i].LatestDefinition.SearchAnalyzer, "status": searchIndexes[i].Status, - "synonyms": flattenSearchIndexSynonyms(searchIndexes[i].GetSynonyms()), + "synonyms": flattenSearchIndexSynonyms(searchIndexes[i].LatestDefinition.GetSynonyms()), "type": searchIndexes[i].Type, } - if searchIndexes[i].Mappings != nil { - searchIndexesMap[i]["mappings_dynamic"] = searchIndexes[i].Mappings.Dynamic + if searchIndexes[i].LatestDefinition.Mappings != nil { + searchIndexesMap[i]["mappings_dynamic"] = searchIndexes[i].LatestDefinition.Mappings.Dynamic - if len(searchIndexes[i].Mappings.Fields) > 0 { - searchIndexMappingFields, err := marshalSearchIndex(searchIndexes[i].Mappings.Fields) + if conversion.HasElementsSliceOrMap(searchIndexes[i].LatestDefinition.Mappings.Fields) { + searchIndexMappingFields, err := marshalSearchIndex(searchIndexes[i].LatestDefinition.Mappings.Fields) if err != nil { return nil, err } @@ -115,7 +116,7 @@ func flattenSearchIndexes(searchIndexes []admin.ClusterSearchIndex, projectID, c } } - if analyzers := searchIndexes[i].GetAnalyzers(); len(analyzers) > 0 { + if analyzers := searchIndexes[i].LatestDefinition.GetAnalyzers(); len(analyzers) > 0 { searchIndexAnalyzers, err := marshalSearchIndex(analyzers) if err != nil { return nil, err @@ -123,7 +124,7 @@ func flattenSearchIndexes(searchIndexes []admin.ClusterSearchIndex, projectID, c searchIndexesMap[i]["analyzers"] = searchIndexAnalyzers } - if fields := searchIndexes[i].GetFields(); len(fields) > 0 { + if fields := searchIndexes[i].LatestDefinition.GetFields(); len(fields) > 0 { fieldsMarshaled, err := marshalSearchIndex(fields) if err != nil { return nil, err diff --git a/internal/service/searchindex/resource_search_index.go b/internal/service/searchindex/resource_search_index.go index 36655a99c3..72f0d0b783 100644 --- a/internal/service/searchindex/resource_search_index.go +++ b/internal/service/searchindex/resource_search_index.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( @@ -187,34 +187,34 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. projectID := ids["project_id"] clusterName := ids["cluster_name"] indexID := ids["index_id"] + indexName := d.Get("name").(string) - searchIndex, _, err := connV2.AtlasSearchApi.GetAtlasSearchIndex(ctx, projectID, clusterName, indexID).Execute() + if d.HasChange("name") || d.HasChange("type") || d.HasChange("database") || d.HasChange("collection_name") { + return diag.Errorf("error updating search index (%s): attributes name, type, database and collection_name can't be updated", indexName) + } + + searchRead, _, err := connV2.AtlasSearchApi.GetAtlasSearchIndex(ctx, projectID, clusterName, indexID).Execute() if err != nil { return diag.Errorf("error getting search index information: %s", err) } - - if d.HasChange("type") { - searchIndex.Type = conversion.StringPtr(d.Get("type").(string)) + searchIndex := &admin.SearchIndexUpdateRequest{ + Definition: admin.SearchIndexUpdateRequestDefinition{ + Analyzer: searchRead.LatestDefinition.Analyzer, + Analyzers: searchRead.LatestDefinition.Analyzers, + Mappings: searchRead.LatestDefinition.Mappings, + SearchAnalyzer: searchRead.LatestDefinition.SearchAnalyzer, + StoredSource: searchRead.LatestDefinition.StoredSource, + Synonyms: searchRead.LatestDefinition.Synonyms, + Fields: searchRead.LatestDefinition.Fields, + }, } if d.HasChange("analyzer") { - searchIndex.Analyzer = conversion.StringPtr(d.Get("analyzer").(string)) - } - - if d.HasChange("collection_name") { - searchIndex.CollectionName = d.Get("collection_name").(string) - } - - if d.HasChange("database") { - searchIndex.Database = d.Get("database").(string) - } - - if d.HasChange("name") { - searchIndex.Name = d.Get("name").(string) + searchIndex.Definition.Analyzer = conversion.StringPtr(d.Get("analyzer").(string)) } if d.HasChange("search_analyzer") { - searchIndex.SearchAnalyzer = conversion.StringPtr(d.Get("search_analyzer").(string)) + searchIndex.Definition.SearchAnalyzer = conversion.StringPtr(d.Get("search_analyzer").(string)) } if d.HasChange("analyzers") { @@ -222,15 +222,15 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if err != nil { return err } - searchIndex.Analyzers = &analyzers + searchIndex.Definition.Analyzers = &analyzers } if d.HasChange("mappings_dynamic") { dynamic := d.Get("mappings_dynamic").(bool) - if searchIndex.Mappings == nil { - searchIndex.Mappings = &admin.ApiAtlasFTSMappings{} + if searchIndex.Definition.Mappings == nil { + searchIndex.Definition.Mappings = &admin.SearchMappings{} } - searchIndex.Mappings.Dynamic = &dynamic + searchIndex.Definition.Mappings.Dynamic = &dynamic } if d.HasChange("mappings_fields") { @@ -238,10 +238,10 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if err != nil { return err } - if searchIndex.Mappings == nil { - searchIndex.Mappings = &admin.ApiAtlasFTSMappings{} + if searchIndex.Definition.Mappings == nil { + searchIndex.Definition.Mappings = &admin.SearchMappings{} } - searchIndex.Mappings.Fields = mappingsFields + searchIndex.Definition.Mappings.Fields = mappingsFields } if d.HasChange("fields") { @@ -249,17 +249,16 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if err != nil { return err } - searchIndex.Fields = &fields + searchIndex.Definition.Fields = conversion.ToAnySlicePointer(&fields) } if d.HasChange("synonyms") { synonyms := expandSearchIndexSynonyms(d) - searchIndex.Synonyms = &synonyms + searchIndex.Definition.Synonyms = &synonyms } - searchIndex.IndexID = conversion.StringPtr("") if _, _, err := connV2.AtlasSearchApi.UpdateAtlasSearchIndex(ctx, projectID, clusterName, indexID, searchIndex).Execute(); err != nil { - return diag.Errorf("error updating search index (%s): %s", searchIndex.Name, err) + return diag.Errorf("error updating search index (%s): %s", indexName, err) } if d.Get("wait_for_index_build_completion").(bool) { @@ -274,8 +273,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } // Wait, catching any errors - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { + if _, err := stateConf.WaitForStateContext(ctx); err != nil { d.SetId(conversion.EncodeStateID(map[string]string{ "project_id": projectID, "cluster_name": clusterName, @@ -313,11 +311,11 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.Errorf("error setting `type` for search index (%s): %s", d.Id(), err) } - if err := d.Set("analyzer", searchIndex.Analyzer); err != nil { + if err := d.Set("analyzer", searchIndex.LatestDefinition.Analyzer); err != nil { return diag.Errorf("error setting `analyzer` for search index (%s): %s", d.Id(), err) } - if analyzers := searchIndex.GetAnalyzers(); len(analyzers) > 0 { + if analyzers := searchIndex.LatestDefinition.GetAnalyzers(); len(analyzers) > 0 { searchIndexMappingFields, err := marshalSearchIndex(analyzers) if err != nil { return diag.FromErr(err) @@ -339,21 +337,21 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.Errorf("error setting `name` for search index (%s): %s", d.Id(), err) } - if err := d.Set("search_analyzer", searchIndex.SearchAnalyzer); err != nil { + if err := d.Set("search_analyzer", searchIndex.LatestDefinition.SearchAnalyzer); err != nil { return diag.Errorf("error setting `searchAnalyzer` for search index (%s): %s", d.Id(), err) } - if err := d.Set("synonyms", flattenSearchIndexSynonyms(searchIndex.GetSynonyms())); err != nil { + if err := d.Set("synonyms", flattenSearchIndexSynonyms(searchIndex.LatestDefinition.GetSynonyms())); err != nil { return diag.Errorf("error setting `synonyms` for search index (%s): %s", d.Id(), err) } - if searchIndex.Mappings != nil { - if err := d.Set("mappings_dynamic", searchIndex.Mappings.Dynamic); err != nil { + if searchIndex.LatestDefinition.Mappings != nil { + if err := d.Set("mappings_dynamic", searchIndex.LatestDefinition.Mappings.Dynamic); err != nil { return diag.Errorf("error setting `mappings_dynamic` for search index (%s): %s", d.Id(), err) } - if len(searchIndex.Mappings.Fields) > 0 { - searchIndexMappingFields, err := marshalSearchIndex(searchIndex.Mappings.Fields) + if conversion.HasElementsSliceOrMap(searchIndex.LatestDefinition.Mappings.Fields) { + searchIndexMappingFields, err := marshalSearchIndex(searchIndex.LatestDefinition.Mappings.Fields) if err != nil { return diag.FromErr(err) } @@ -363,7 +361,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di } } - if fields := searchIndex.GetFields(); len(fields) > 0 { + if fields := searchIndex.LatestDefinition.GetFields(); len(fields) > 0 { fieldsMarshaled, err := marshalSearchIndex(fields) if err != nil { return diag.FromErr(err) @@ -398,13 +396,15 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) indexType := d.Get("type").(string) - searchIndexRequest := &admin.ClusterSearchIndex{ + searchIndexRequest := &admin.SearchIndexCreateRequest{ Type: conversion.StringPtr(indexType), - Analyzer: conversion.StringPtr(d.Get("analyzer").(string)), CollectionName: d.Get("collection_name").(string), Database: d.Get("database").(string), Name: d.Get("name").(string), - SearchAnalyzer: conversion.StringPtr(d.Get("search_analyzer").(string)), + Definition: &admin.BaseSearchIndexCreateRequestDefinition{ + Analyzer: conversion.StringPtr(d.Get("analyzer").(string)), + SearchAnalyzer: conversion.StringPtr(d.Get("search_analyzer").(string)), + }, } if indexType == vectorSearch { @@ -412,24 +412,24 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if err != nil { return err } - searchIndexRequest.Fields = &fields + searchIndexRequest.Definition.Fields = conversion.ToAnySlicePointer(&fields) } else { analyzers, err := unmarshalSearchIndexAnalyzersFields(d.Get("analyzers").(string)) if err != nil { return err } - searchIndexRequest.Analyzers = &analyzers + searchIndexRequest.Definition.Analyzers = &analyzers mappingsFields, err := unmarshalSearchIndexMappingFields(d.Get("mappings_fields").(string)) if err != nil { return err } dynamic := d.Get("mappings_dynamic").(bool) - searchIndexRequest.Mappings = &admin.ApiAtlasFTSMappings{ + searchIndexRequest.Definition.Mappings = &admin.SearchMappings{ Dynamic: &dynamic, Fields: mappingsFields, } synonyms := expandSearchIndexSynonyms(d) - searchIndexRequest.Synonyms = &synonyms + searchIndexRequest.Definition.Synonyms = &synonyms } dbSearchIndexRes, _, err := connV2.AtlasSearchApi.CreateAtlasSearchIndex(ctx, projectID, clusterName, searchIndexRequest).Execute() @@ -561,8 +561,8 @@ func unmarshalSearchIndexFields(str string) ([]map[string]any, diag.Diagnostics) return fields, nil } -func unmarshalSearchIndexAnalyzersFields(str string) ([]admin.ApiAtlasFTSAnalyzers, diag.Diagnostics) { - fields := []admin.ApiAtlasFTSAnalyzers{} +func unmarshalSearchIndexAnalyzersFields(str string) ([]admin.AtlasSearchAnalyzer, diag.Diagnostics) { + fields := []admin.AtlasSearchAnalyzer{} if str == "" { return fields, nil } diff --git a/internal/service/serverlessinstance/data_source_serverless_instances.go b/internal/service/serverlessinstance/data_source_serverless_instances.go index 23f8eb5935..a55498593a 100644 --- a/internal/service/serverlessinstance/data_source_serverless_instances.go +++ b/internal/service/serverlessinstance/data_source_serverless_instances.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/serverlessinstance/resource_serverless_instance.go b/internal/service/serverlessinstance/resource_serverless_instance.go index 4c24a1cf3e..2f7a525db2 100644 --- a/internal/service/serverlessinstance/resource_serverless_instance.go +++ b/internal/service/serverlessinstance/resource_serverless_instance.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/serverlessinstance/resource_serverless_instance_test.go b/internal/service/serverlessinstance/resource_serverless_instance_test.go index d6b3fd9447..c7602623d9 100644 --- a/internal/service/serverlessinstance/resource_serverless_instance_test.go +++ b/internal/service/serverlessinstance/resource_serverless_instance_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go b/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go index 8215f7d0d3..ac5219b683 100644 --- a/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go +++ b/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/sharedtier/data_source_shared_tier_snapshots.go b/internal/service/sharedtier/data_source_shared_tier_snapshots.go index 881ec90a53..ff83218e5e 100644 --- a/internal/service/sharedtier/data_source_shared_tier_snapshots.go +++ b/internal/service/sharedtier/data_source_shared_tier_snapshots.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/streamconnection/data_source_stream_connections.go b/internal/service/streamconnection/data_source_stream_connections.go index 9beeefdff8..5b4835dd4b 100644 --- a/internal/service/streamconnection/data_source_stream_connections.go +++ b/internal/service/streamconnection/data_source_stream_connections.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var _ datasource.DataSource = &streamConnectionsDS{} diff --git a/internal/service/streamconnection/data_source_stream_connections_test.go b/internal/service/streamconnection/data_source_stream_connections_test.go index cdf5c80875..ca480ae389 100644 --- a/internal/service/streamconnection/data_source_stream_connections_test.go +++ b/internal/service/streamconnection/data_source_stream_connections_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestAccStreamDSStreamConnections_basic(t *testing.T) { diff --git a/internal/service/streamconnection/model_stream_connection.go b/internal/service/streamconnection/model_stream_connection.go index 65225d826f..0c2a0ece7d 100644 --- a/internal/service/streamconnection/model_stream_connection.go +++ b/internal/service/streamconnection/model_stream_connection.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewStreamConnectionReq(ctx context.Context, plan *TFStreamConnectionModel) (*admin.StreamsConnection, diag.Diagnostics) { diff --git a/internal/service/streamconnection/model_stream_connection_test.go b/internal/service/streamconnection/model_stream_connection_test.go index 9407f940f7..16ef34747d 100644 --- a/internal/service/streamconnection/model_stream_connection_test.go +++ b/internal/service/streamconnection/model_stream_connection_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamconnection" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/streaminstance/data_source_stream_instances.go b/internal/service/streaminstance/data_source_stream_instances.go index 8275861ac1..b2cff18b7b 100644 --- a/internal/service/streaminstance/data_source_stream_instances.go +++ b/internal/service/streaminstance/data_source_stream_instances.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) var _ datasource.DataSource = &streamInstancesDS{} diff --git a/internal/service/streaminstance/data_source_stream_instances_test.go b/internal/service/streaminstance/data_source_stream_instances_test.go index ac4cc04717..9ea31f3118 100644 --- a/internal/service/streaminstance/data_source_stream_instances_test.go +++ b/internal/service/streaminstance/data_source_stream_instances_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func TestAccStreamDSStreamInstances_basic(t *testing.T) { diff --git a/internal/service/streaminstance/model_stream_instance.go b/internal/service/streaminstance/model_stream_instance.go index 1bc4ab8822..a50a3253ec 100644 --- a/internal/service/streaminstance/model_stream_instance.go +++ b/internal/service/streaminstance/model_stream_instance.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func NewStreamInstanceCreateReq(ctx context.Context, plan *TFStreamInstanceModel) (*admin.StreamsTenant, diag.Diagnostics) { diff --git a/internal/service/streaminstance/model_stream_instance_test.go b/internal/service/streaminstance/model_stream_instance_test.go index f932ef2aae..126baeb093 100644 --- a/internal/service/streaminstance/model_stream_instance_test.go +++ b/internal/service/streaminstance/model_stream_instance_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streaminstance" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/team/data_source_team.go b/internal/service/team/data_source_team.go index 52f52a8041..99017170f2 100644 --- a/internal/service/team/data_source_team.go +++ b/internal/service/team/data_source_team.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func DataSource() *schema.Resource { diff --git a/internal/service/team/resource_team.go b/internal/service/team/resource_team.go index f170e5ed29..a9c423e629 100644 --- a/internal/service/team/resource_team.go +++ b/internal/service/team/resource_team.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/service/thirdpartyintegration/data_source_third_party_integrations.go b/internal/service/thirdpartyintegration/data_source_third_party_integrations.go index 668cd6b269..daf79ed180 100644 --- a/internal/service/thirdpartyintegration/data_source_third_party_integrations.go +++ b/internal/service/thirdpartyintegration/data_source_third_party_integrations.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go index 786a179ec0..7b734cb61c 100644 --- a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go +++ b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/testutil/acc/atlas.go b/internal/testutil/acc/atlas.go index 539989cc6b..b2bcfe010b 100644 --- a/internal/testutil/acc/atlas.go +++ b/internal/testutil/acc/atlas.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func createProject(tb testing.TB, name string) string { diff --git a/internal/testutil/acc/database_user.go b/internal/testutil/acc/database_user.go index 7e7b3bbb39..7710bb1333 100644 --- a/internal/testutil/acc/database_user.go +++ b/internal/testutil/acc/database_user.go @@ -3,7 +3,7 @@ package acc import ( "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func ConfigDatabaseUserBasic(projectID, username, roleName, keyLabel, valueLabel string) string { diff --git a/internal/testutil/acc/factory.go b/internal/testutil/acc/factory.go index 531ad26511..80b3fb63ea 100644 --- a/internal/testutil/acc/factory.go +++ b/internal/testutil/acc/factory.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/provider" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const ( diff --git a/internal/testutil/acc/project.go b/internal/testutil/acc/project.go index b28699f069..46e9bd01b7 100644 --- a/internal/testutil/acc/project.go +++ b/internal/testutil/acc/project.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func CheckDestroyProject(s *terraform.State) error { diff --git a/internal/testutil/acc/serverless.go b/internal/testutil/acc/serverless.go index 6a6970a967..d9c6501970 100644 --- a/internal/testutil/acc/serverless.go +++ b/internal/testutil/acc/serverless.go @@ -3,7 +3,7 @@ package acc import ( "fmt" - "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func ConfigServerlessInstance(projectID, name string, ignoreConnectionStrings bool, autoIndexing *bool, tags []admin.ResourceTag) string { From 2b82c1a6fb5ae328b61c843ea364966616b6ed1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Jul 2024 07:35:08 +0200 Subject: [PATCH 11/84] chore: Bump github.com/aws/aws-sdk-go from 1.54.8 to 1.54.13 (#2383) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.54.8 to 1.54.13. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.8...v1.54.13) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 88567f43e9..f43688becb 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 - github.com/aws/aws-sdk-go v1.54.8 + github.com/aws/aws-sdk-go v1.54.13 github.com/go-test/deep v1.1.1 github.com/hashicorp/go-changelog v0.0.0-20240318095659-4d68c58a6e7f github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 diff --git a/go.sum b/go.sum index c42a947457..0926edc4cc 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.54.8 h1:+soIjaRsuXfEJ9ts9poJD2fIIzSSRwfx+T69DrTtL2M= -github.com/aws/aws-sdk-go v1.54.8/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.13 h1:zpCuiG+/mFdDY/klKJvmSioAZWk45F4rLGq0JWVAAzk= +github.com/aws/aws-sdk-go v1.54.13/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= From 15143f06488bef3a9a0c2f208db57c6abd380325 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Jul 2024 07:35:27 +0200 Subject: [PATCH 12/84] chore: Bump amannn/action-semantic-pull-request from 5.5.2 to 5.5.3 (#2382) Bumps [amannn/action-semantic-pull-request](https://github.com/amannn/action-semantic-pull-request) from 5.5.2 to 5.5.3. - [Release notes](https://github.com/amannn/action-semantic-pull-request/releases) - [Changelog](https://github.com/amannn/action-semantic-pull-request/blob/main/CHANGELOG.md) - [Commits](https://github.com/amannn/action-semantic-pull-request/compare/cfb60706e18bc85e8aec535e3c577abe8f70378e...0723387faaf9b38adef4775cd42cfd5155ed6017) --- updated-dependencies: - dependency-name: amannn/action-semantic-pull-request dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/pull-request-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull-request-lint.yml b/.github/workflows/pull-request-lint.yml index 9acb89bf44..b7746d7a9c 100644 --- a/.github/workflows/pull-request-lint.yml +++ b/.github/workflows/pull-request-lint.yml @@ -17,7 +17,7 @@ jobs: permissions: pull-requests: write # Needed by sticky-pull-request-comment steps: - - uses: amannn/action-semantic-pull-request@cfb60706e18bc85e8aec535e3c577abe8f70378e + - uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 id: lint_pr_title env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From d6ac0c87f520c525d2b8e4e564727044c5b97f2e Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 3 Jul 2024 14:58:51 +0200 Subject: [PATCH 13/84] test: Improves tests for mongodbatlas_search_index (#2384) * checkVector * checkBasic * checkWithMapping * checkWithSynonyms * checkAdditional * checkAdditionalAnalyzers and checkAdditionalMappingsFields * remove addAttrChecks and addAttrSetChecks * use commonChecks in all checks * test checks cleanup --- .../searchindex/resource_search_index_test.go | 215 +++++++++--------- 1 file changed, 111 insertions(+), 104 deletions(-) diff --git a/internal/service/searchindex/resource_search_index_test.go b/internal/service/searchindex/resource_search_index_test.go index dc0cc3c0ac..d0edb9cc84 100644 --- a/internal/service/searchindex/resource_search_index_test.go +++ b/internal/service/searchindex/resource_search_index_test.go @@ -20,18 +20,15 @@ func TestAccSearchIndex_withSearchType(t *testing.T) { projectID, clusterName = acc.ClusterNameExecution(t) indexName = acc.RandomName() databaseName = acc.RandomName() - indexType = "search" - mappingsDynamic = "true" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configBasic(projectID, indexName, databaseName, clusterName, true), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: configBasic(projectID, clusterName, indexName, "search", databaseName), + Check: checkBasic(projectID, clusterName, indexName, "search", databaseName), }, }, }) @@ -42,11 +39,7 @@ func TestAccSearchIndex_withMapping(t *testing.T) { projectID, clusterName = acc.ClusterNameExecution(t) indexName = acc.RandomName() databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "false" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) - checks = addAttrSetChecks(checks, "mappings_fields", "analyzers") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, @@ -54,7 +47,7 @@ func TestAccSearchIndex_withMapping(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithMapping(projectID, indexName, databaseName, clusterName), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Check: checkWithMapping(projectID, indexName, databaseName, clusterName), }, }, }) @@ -65,18 +58,7 @@ func TestAccSearchIndex_withSynonyms(t *testing.T) { projectID, clusterName = acc.ClusterNameExecution(t) indexName = acc.RandomName() databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "true" - mapChecks = map[string]string{ - "synonyms.#": "1", - "synonyms.0.analyzer": "lucene.simple", - "synonyms.0.name": "synonym_test", - "synonyms.0.source_collection": collectionName, - } ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) - checks = addAttrChecks(checks, mapChecks) - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, @@ -84,7 +66,7 @@ func TestAccSearchIndex_withSynonyms(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, with), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Check: checkWithSynonyms(projectID, indexName, databaseName, clusterName, with), }, }, }) @@ -95,17 +77,7 @@ func TestAccSearchIndex_updatedToEmptySynonyms(t *testing.T) { projectID, clusterName = acc.ClusterNameExecution(t) indexName = acc.RandomName() databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "true" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) - checks1 := addAttrChecks(checks, map[string]string{ - "synonyms.#": "1", - "synonyms.0.analyzer": "lucene.simple", - "synonyms.0.name": "synonym_test", - "synonyms.0.source_collection": collectionName, - }) - checks2 := addAttrChecks(checks, map[string]string{"synonyms.#": "0"}) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, @@ -113,11 +85,11 @@ func TestAccSearchIndex_updatedToEmptySynonyms(t *testing.T) { Steps: []resource.TestStep{ { Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, with), - Check: resource.ComposeAggregateTestCheckFunc(checks1...), + Check: checkWithSynonyms(projectID, indexName, databaseName, clusterName, with), }, { Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, without), - Check: resource.ComposeAggregateTestCheckFunc(checks2...), + Check: checkWithSynonyms(projectID, indexName, databaseName, clusterName, without), }, }, }) @@ -136,17 +108,11 @@ func TestAccSearchIndex_updatedToEmptyAnalyzers(t *testing.T) { Steps: []resource.TestStep{ { Config: configAdditional(projectID, indexName, databaseName, clusterName, analyzersTF), - Check: resource.ComposeAggregateTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttrWith(resourceName, "analyzers", acc.JSONEquals(analyzersJSON)), - ), + Check: checkAdditionalAnalyzers(projectID, indexName, databaseName, clusterName, true), }, { Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), - Check: resource.ComposeAggregateTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "analyzers", ""), - ), + Check: checkAdditionalAnalyzers(projectID, indexName, databaseName, clusterName, false), }, }, }) @@ -165,17 +131,11 @@ func TestAccSearchIndex_updatedToEmptyMappingsFields(t *testing.T) { Steps: []resource.TestStep{ { Config: configAdditional(projectID, indexName, databaseName, clusterName, mappingsFieldsTF), - Check: resource.ComposeAggregateTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttrWith(resourceName, "mappings_fields", acc.JSONEquals(mappingsFieldsJSON)), - ), + Check: checkAdditionalMappingsFields(projectID, indexName, databaseName, clusterName, true), }, { Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), - Check: resource.ComposeAggregateTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "mappings_fields", ""), - ), + Check: checkAdditionalMappingsFields(projectID, indexName, databaseName, clusterName, false), }, }, }) @@ -191,22 +151,18 @@ func basicTestCase(tb testing.TB) *resource.TestCase { projectID, clusterName = acc.ClusterNameExecution(tb) indexName = acc.RandomName() databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "true" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) - return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(tb) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configBasic(projectID, indexName, databaseName, clusterName, false), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: configBasic(projectID, clusterName, indexName, "", databaseName), + Check: checkBasic(projectID, clusterName, indexName, "", databaseName), }, { - Config: configBasic(projectID, indexName, databaseName, clusterName, false), + Config: configBasic(projectID, clusterName, indexName, "", databaseName), ResourceName: resourceName, ImportStateIdFunc: importStateIDFunc(resourceName), ImportState: true, @@ -221,21 +177,8 @@ func basicVectorTestCase(tb testing.TB) *resource.TestCase { var ( projectID, clusterName = acc.ClusterNameExecution(tb) indexName = acc.RandomName() - indexType = "vectorSearch" databaseName = acc.RandomName() - attributes = map[string]string{ - "name": indexName, - "cluster_name": clusterName, - "database": databaseName, - "collection_name": collectionName, - "type": indexType, - } ) - checks := addAttrChecks(nil, attributes) - checks = acc.AddAttrSetChecks(resourceName, checks, "project_id") - checks = acc.AddAttrSetChecks(datasourceName, checks, "project_id", "index_id") - checks = append(checks, resource.TestCheckResourceAttrWith(datasourceName, "fields", acc.JSONEquals(fieldsJSON))) - return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(tb) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, @@ -243,34 +186,33 @@ func basicVectorTestCase(tb testing.TB) *resource.TestCase { Steps: []resource.TestStep{ { Config: configVector(projectID, indexName, databaseName, clusterName), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Check: checkVector(projectID, indexName, databaseName, clusterName), }, }, } } -func commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName string) []resource.TestCheckFunc { +func checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic string, extra ...resource.TestCheckFunc) resource.TestCheckFunc { attributes := map[string]string{ - "name": indexName, - "cluster_name": clusterName, - "database": databaseName, - "collection_name": collectionName, - "type": indexType, - "mappings_dynamic": mappingsDynamic, + "project_id": projectID, + "cluster_name": clusterName, + "name": indexName, + "type": indexType, + "database": databaseName, + "collection_name": collectionName, } - checks := addAttrChecks(nil, attributes) - checks = acc.AddAttrSetChecks(resourceName, checks, "project_id") - return acc.AddAttrSetChecks(datasourceName, checks, "project_id", "index_id") -} - -func addAttrChecks(checks []resource.TestCheckFunc, mapChecks map[string]string) []resource.TestCheckFunc { - checks = acc.AddAttrChecks(resourceName, checks, mapChecks) - return acc.AddAttrChecks(datasourceName, checks, mapChecks) -} - -func addAttrSetChecks(checks []resource.TestCheckFunc, attrNames ...string) []resource.TestCheckFunc { - checks = acc.AddAttrSetChecks(resourceName, checks, attrNames...) - return acc.AddAttrSetChecks(datasourceName, checks, attrNames...) + if indexType != "vectorSearch" { + attributes["mappings_dynamic"] = mappingsDynamic + } + checks := []resource.TestCheckFunc{ + checkExists(resourceName), + } + checks = acc.AddAttrChecks(resourceName, checks, attributes) + checks = acc.AddAttrChecks(datasourceName, checks, attributes) + checks = acc.AddAttrSetChecks(resourceName, checks, "index_id") + checks = acc.AddAttrSetChecks(datasourceName, checks, "index_id") + checks = append(checks, extra...) + return resource.ComposeAggregateTestCheckFunc(checks...) } func checkExists(resourceName string) resource.TestCheckFunc { @@ -291,10 +233,10 @@ func checkExists(resourceName string) resource.TestCheckFunc { } } -func configBasic(projectID, indexName, databaseName, clusterName string, explicitType bool) string { - var indexType string - if explicitType { - indexType = `type="search"` +func configBasic(projectID, clusterName, indexName, indexType, databaseName string) string { + var indexTypeStr string + if indexType != "" { + indexTypeStr = fmt.Sprintf("type=%q", indexType) } return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { @@ -309,11 +251,16 @@ func configBasic(projectID, indexName, databaseName, clusterName string, explici } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]q - project_id = %[2]q + cluster_name = mongodbatlas_search_index.test.cluster_name + project_id = mongodbatlas_search_index.test.project_id index_id = mongodbatlas_search_index.test.index_id } - `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, indexType) + `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, indexTypeStr) +} + +func checkBasic(projectID, clusterName, indexName, indexType, databaseName string) resource.TestCheckFunc { + mappingsDynamic := "true" + return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic) } func configWithMapping(projectID, indexName, databaseName, clusterName string) string { @@ -331,13 +278,22 @@ func configWithMapping(projectID, indexName, databaseName, clusterName string) s } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]q - project_id = %[2]q + cluster_name = mongodbatlas_search_index.test.cluster_name + project_id = mongodbatlas_search_index.test.project_id index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, analyzersTF, mappingsFieldsTF) } +func checkWithMapping(projectID, indexName, databaseName, clusterName string) resource.TestCheckFunc { + indexType := "" + mappingsDynamic := "false" + attrNames := []string{"mappings_fields", "analyzers"} + checks := acc.AddAttrSetChecks(resourceName, nil, attrNames...) + checks = acc.AddAttrSetChecks(datasourceName, checks, attrNames...) + return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic, checks...) +} + func configWithSynonyms(projectID, indexName, databaseName, clusterName string, has bool) string { var synonymsStr string if has { @@ -363,13 +319,30 @@ func configWithSynonyms(projectID, indexName, databaseName, clusterName string, } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]q - project_id = %[2]q + cluster_name = mongodbatlas_search_index.test.cluster_name + project_id = mongodbatlas_search_index.test.project_id index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, synonymsStr) } +func checkWithSynonyms(projectID, indexName, databaseName, clusterName string, has bool) resource.TestCheckFunc { + indexType := "" + mappingsDynamic := "true" + attrs := map[string]string{"synonyms.#": "0"} + if has { + attrs = map[string]string{ + "synonyms.#": "1", + "synonyms.0.analyzer": "lucene.simple", + "synonyms.0.name": "synonym_test", + "synonyms.0.source_collection": collectionName, + } + } + checks := acc.AddAttrChecks(resourceName, nil, attrs) + checks = acc.AddAttrChecks(datasourceName, checks, attrs) + return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic, checks...) +} + func configAdditional(projectID, indexName, databaseName, clusterName, additional string) string { return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { @@ -382,9 +355,35 @@ func configAdditional(projectID, indexName, databaseName, clusterName, additiona mappings_dynamic = true %[7]s } + + data "mongodbatlas_search_index" "data_index" { + cluster_name = mongodbatlas_search_index.test.cluster_name + project_id = mongodbatlas_search_index.test.project_id + index_id = mongodbatlas_search_index.test.index_id + } `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, additional) } +func checkAdditionalAnalyzers(projectID, indexName, databaseName, clusterName string, has bool) resource.TestCheckFunc { + indexType := "" + mappingsDynamic := "true" + check := resource.TestCheckResourceAttr(resourceName, "analyzers", "") + if has { + check = resource.TestCheckResourceAttrWith(resourceName, "analyzers", acc.JSONEquals(analyzersJSON)) + } + return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic, check) +} + +func checkAdditionalMappingsFields(projectID, indexName, databaseName, clusterName string, has bool) resource.TestCheckFunc { + indexType := "" + mappingsDynamic := "true" + check := resource.TestCheckResourceAttr(resourceName, "mappings_fields", "") + if has { + check = resource.TestCheckResourceAttrWith(resourceName, "mappings_fields", acc.JSONEquals(mappingsFieldsJSON)) + } + return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic, check) +} + func configVector(projectID, indexName, databaseName, clusterName string) string { return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { @@ -402,13 +401,21 @@ func configVector(projectID, indexName, databaseName, clusterName string) string } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]q - project_id = %[2]q + cluster_name = mongodbatlas_search_index.test.cluster_name + project_id = mongodbatlas_search_index.test.project_id index_id = mongodbatlas_search_index.test.index_id } `, clusterName, projectID, indexName, databaseName, collectionName, fieldsJSON) } +func checkVector(projectID, indexName, databaseName, clusterName string) resource.TestCheckFunc { + indexType := "vectorSearch" + mappingsDynamic := "true" + return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic, + resource.TestCheckResourceAttrWith(resourceName, "fields", acc.JSONEquals(fieldsJSON)), + resource.TestCheckResourceAttrWith(datasourceName, "fields", acc.JSONEquals(fieldsJSON))) +} + func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { rs, ok := s.RootModule().Resources[resourceName] From 789f38a31b410c196d2196fa9f29fcd4aaa02faf Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 3 Jul 2024 16:12:30 +0200 Subject: [PATCH 14/84] chore: Updates nightly tests to TF 1.9.x (#2386) * update nightly tests to TF 1.9.x * use TF var * keep until 1.3.x * Update .github/workflows/update_tf_compatibility_matrix.yml Co-authored-by: maastha <122359335+maastha@users.noreply.github.com> --------- Co-authored-by: maastha <122359335+maastha@users.noreply.github.com> --- .github/workflows/test-suite.yml | 4 ++-- .github/workflows/update_tf_compatibility_matrix.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 7a7bf651c1..ff25bb843d 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -6,7 +6,7 @@ on: inputs: terraform_matrix: description: 'Terraform version matrix (JSON array)' - default: '["1.8.x", "1.7.x", "1.6.x", "1.5.x", "1.4.x", "1.3.x", "1.2.x"]' + default: '["1.9.x", "1.8.x", "1.7.x", "1.6.x", "1.5.x", "1.4.x", "1.3.x"]' provider_matrix: description: 'Previous MongoDB Atlas Provider version matrix for migration tests (JSON array)' default: '[""]' # "" for latest version @@ -42,7 +42,7 @@ concurrency: jobs: versions: env: - schedule_terraform_matrix: '["1.8.x"]' + schedule_terraform_matrix: '["${{ vars.TF_VERSION_LATEST }}"]' schedule_provider_matrix: '[""]' # "" for latest version runs-on: ubuntu-latest outputs: diff --git a/.github/workflows/update_tf_compatibility_matrix.yml b/.github/workflows/update_tf_compatibility_matrix.yml index 7af1303001..ffa81d48e9 100644 --- a/.github/workflows/update_tf_compatibility_matrix.yml +++ b/.github/workflows/update_tf_compatibility_matrix.yml @@ -30,4 +30,4 @@ jobs: commit-message: "doc: Updates Terraform Compatibility Matrix documentation" delete-branch: true branch: terraform-compatibility-matrix-update - body: "Automatic updates for Terraform Compatibility Matrix documentation. **Action Required**: Update .tools-version file and TF_VERSION_LATEST GitHub environment variable if needed." + body: "Automatic updates for Terraform Compatibility Matrix documentation. **Action Required**: Update test-suite.yml, .tools-version files, and TF_VERSION_LATEST GitHub environment variable if needed." From 5dffb29bce51d777c7b41b9f8198100cdabb033b Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Thu, 4 Jul 2024 06:49:04 +0100 Subject: [PATCH 15/84] fix: Emptying cloud_back_schedule "copy_settings" (#2387) * test: add test to reproduce Github Issue * fix: update copy_settings on changes (even when empty) * docs: Add changelog entry * chore: fix changelog entry * apply review comments --- .changelog/2387.txt | 3 + .../resource_cloud_backup_schedule.go | 12 +- .../resource_cloud_backup_schedule_test.go | 112 +++++++++++------- 3 files changed, 74 insertions(+), 53 deletions(-) create mode 100644 .changelog/2387.txt diff --git a/.changelog/2387.txt b/.changelog/2387.txt new file mode 100644 index 0000000000..e56999ff5d --- /dev/null +++ b/.changelog/2387.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_cloud_backup_schedule: Updates `copy_settings` on changes (even when empty) +``` \ No newline at end of file diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go index 19a1e70d05..202a3cfb2d 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go @@ -492,9 +492,9 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV2 *admin.APICli } req := &admin.DiskBackupSnapshotSchedule{} - - if v, ok := d.GetOk("copy_settings"); ok && len(v.([]any)) > 0 { - req.CopySettings = expandCopySettings(v.([]any)) + copySettings := d.Get("copy_settings") + if copySettings != nil && (conversion.HasElementsSliceOrMap(copySettings) || d.HasChange("copy_settings")) { + req.CopySettings = expandCopySettings(copySettings.([]any)) } var policiesItem []admin.DiskBackupApiPolicyItem @@ -642,11 +642,7 @@ func expandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting { } func expandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting { - if len(tfList) == 0 { - return nil - } - - var copySettings []admin.DiskBackupCopySetting + copySettings := make([]admin.DiskBackupCopySetting, 0) for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 3d0bb0bf60..8dbabca280 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -254,7 +254,45 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { var ( projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() + checkMap = map[string]string{ + "cluster_name": clusterName, + "reference_hour_of_day": "3", + "reference_minute_of_hour": "45", + "restore_window_days": "1", + "policy_item_hourly.#": "1", + "policy_item_daily.#": "1", + "policy_item_weekly.#": "1", + "policy_item_monthly.#": "1", + "policy_item_yearly.#": "1", + "policy_item_hourly.0.frequency_interval": "1", + "policy_item_hourly.0.retention_unit": "days", + "policy_item_hourly.0.retention_value": "1", + "policy_item_daily.0.frequency_interval": "1", + "policy_item_daily.0.retention_unit": "days", + "policy_item_daily.0.retention_value": "2", + "policy_item_weekly.0.frequency_interval": "4", + "policy_item_weekly.0.retention_unit": "weeks", + "policy_item_weekly.0.retention_value": "3", + "policy_item_monthly.0.frequency_interval": "5", + "policy_item_monthly.0.retention_unit": "months", + "policy_item_monthly.0.retention_value": "4", + "policy_item_yearly.0.frequency_interval": "1", + "policy_item_yearly.0.retention_unit": "years", + "policy_item_yearly.0.retention_value": "1", + } + copySettingsChecks = map[string]string{ + "copy_settings.#": "1", + "copy_settings.0.cloud_provider": "AWS", + "copy_settings.0.region_name": "US_EAST_1", + "copy_settings.0.should_copy_oplogs": "true", + } + emptyCopySettingsChecks = map[string]string{ + "copy_settings.#": "0", + } ) + checksDefault := acc.AddAttrChecks(resourceName, []resource.TestCheckFunc{checkExists(resourceName)}, checkMap) + checksCreate := acc.AddAttrChecks(resourceName, checksDefault, copySettingsChecks) + checksUpdate := acc.AddAttrChecks(resourceName, checksDefault, emptyCopySettingsChecks) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -262,41 +300,20 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(projectID, clusterName, &admin.DiskBackupSnapshotSchedule{ + Config: configCopySettings(projectID, clusterName, false, &admin.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), }), - Check: resource.ComposeAggregateTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), - resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), - resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), - resource.TestCheckResourceAttr(resourceName, "restore_window_days", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.0.frequency_interval", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.0.retention_unit", "days"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.0.retention_value", "2"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.0.frequency_interval", "4"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.0.retention_unit", "weeks"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.0.retention_value", "3"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.0.frequency_interval", "5"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.0.retention_unit", "months"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.0.retention_value", "4"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.frequency_interval", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_unit", "years"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_value", "1"), - resource.TestCheckResourceAttr(resourceName, "copy_settings.0.cloud_provider", "AWS"), - resource.TestCheckResourceAttr(resourceName, "copy_settings.0.region_name", "US_EAST_1"), - resource.TestCheckResourceAttr(resourceName, "copy_settings.0.should_copy_oplogs", "true"), - ), + Check: resource.ComposeAggregateTestCheckFunc(checksCreate...), + }, + { + Config: configCopySettings(projectID, clusterName, true, &admin.DiskBackupSnapshotSchedule{ + ReferenceHourOfDay: conversion.Pointer(3), + ReferenceMinuteOfHour: conversion.Pointer(45), + RestoreWindowDays: conversion.Pointer(1), + }), + Check: resource.ComposeAggregateTestCheckFunc(checksUpdate...), }, }, }) @@ -507,7 +524,23 @@ func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) s `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configCopySettings(projectID, clusterName string, p *admin.DiskBackupSnapshotSchedule) string { +func configCopySettings(projectID, clusterName string, emptyCopySettings bool, p *admin.DiskBackupSnapshotSchedule) string { + var copySettings string + if !emptyCopySettings { + copySettings = ` + copy_settings { + cloud_provider = "AWS" + frequencies = ["HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY", + "YEARLY", + "ON_DEMAND"] + region_name = "US_EAST_1" + replication_spec_id = mongodbatlas_cluster.my_cluster.replication_specs.*.id[0] + should_copy_oplogs = true + }` + } return fmt.Sprintf(` resource "mongodbatlas_cluster" "my_cluster" { project_id = %[1]q @@ -564,20 +597,9 @@ func configCopySettings(projectID, clusterName string, p *admin.DiskBackupSnapsh retention_unit = "years" retention_value = 1 } - copy_settings { - cloud_provider = "AWS" - frequencies = ["HOURLY", - "DAILY", - "WEEKLY", - "MONTHLY", - "YEARLY", - "ON_DEMAND"] - region_name = "US_EAST_1" - replication_spec_id = mongodbatlas_cluster.my_cluster.replication_specs.*.id[0] - should_copy_oplogs = true - } + %s } - `, projectID, clusterName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, projectID, clusterName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings) } func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { From 7de7e648071fced797d1bb3218db1daebecd94b6 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Thu, 4 Jul 2024 05:50:56 +0000 Subject: [PATCH 16/84] chore: Updates CHANGELOG.md for #2387 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a84a5bbe2f..590c1da37e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## (Unreleased) +BUG FIXES: + +* resource/mongodbatlas_cloud_backup_schedule: Updates `copy_settings` on changes (even when empty) ([#2387](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2387)) + ## 1.17.3 (June 27, 2024) ## 1.17.2 (June 20, 2024) From 2288b94e681bca3ac03c546b6db9fa7dc372ccc0 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:31:03 +0200 Subject: [PATCH 17/84] chore: Updates delete logic for `mongodbatlas_search_deployment` (#2389) * update delete logic * update unit test --- .../searchdeployment/state_transition_search_deployment.go | 4 ++-- .../state_transition_search_deployment_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/searchdeployment/state_transition_search_deployment.go b/internal/service/searchdeployment/state_transition_search_deployment.go index ff0ea37ab1..3ba981c451 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment.go +++ b/internal/service/searchdeployment/state_transition_search_deployment.go @@ -13,7 +13,7 @@ import ( "go.mongodb.org/atlas-sdk/v20240530002/admin" ) -const SearchDeploymentDoesNotExistsError = "ATLAS_FTS_DEPLOYMENT_DOES_NOT_EXIST" +const SearchDeploymentDoesNotExistsError = "ATLAS_SEARCH_DEPLOYMENT_DOES_NOT_EXIST" func WaitSearchNodeStateTransition(ctx context.Context, projectID, clusterName string, client admin.AtlasSearchApi, timeConfig retrystrategy.TimeConfig) (*admin.ApiSearchDeploymentResponse, error) { @@ -56,7 +56,7 @@ func searchDeploymentRefreshFunc(ctx context.Context, projectID, clusterName str return nil, "", err } if err != nil { - if resp.StatusCode == 400 && strings.Contains(err.Error(), SearchDeploymentDoesNotExistsError) { + if resp.StatusCode == 404 && strings.Contains(err.Error(), SearchDeploymentDoesNotExistsError) { return "", retrystrategy.RetryStrategyDeletedState, nil } if resp.StatusCode == 503 { diff --git a/internal/service/searchdeployment/state_transition_search_deployment_test.go b/internal/service/searchdeployment/state_transition_search_deployment_test.go index ea9b197a50..21511e0d95 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment_test.go +++ b/internal/service/searchdeployment/state_transition_search_deployment_test.go @@ -20,7 +20,7 @@ var ( updating = "UPDATING" idle = "IDLE" unknown = "" - sc400 = conversion.IntPtr(400) + sc404 = conversion.IntPtr(404) sc500 = conversion.IntPtr(500) sc503 = conversion.IntPtr(503) ) @@ -94,7 +94,7 @@ func TestSearchDeploymentStateTransitionForDelete(t *testing.T) { name: "Regular transition to DELETED", mockResponses: []response{ {state: &updating}, - {statusCode: sc400, err: errors.New(searchdeployment.SearchDeploymentDoesNotExistsError)}, + {statusCode: sc404, err: errors.New(searchdeployment.SearchDeploymentDoesNotExistsError)}, }, expectedError: false, }, From 76ffb6980c8f3ab93d4bfa2b4a50ac4e721699e0 Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Tue, 9 Jul 2024 10:08:36 +0100 Subject: [PATCH 18/84] refactor: use advanced_cluster instead of cluster (#2392) --- .../resource_global_cluster_config_test.go | 48 ++++++++++++++----- internal/testutil/acc/cluster.go | 28 ++++++----- 2 files changed, 52 insertions(+), 24 deletions(-) diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go index 342a354f21..3d8149c92c 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go @@ -273,41 +273,65 @@ const ( replication_specs { zone_name = "US" num_shards = 1 - regions_config { + region_configs { + auto_scaling { + disk_gb_enabled = false + } region_name = "US_EAST_1" - electable_nodes = 3 + provider_name = "AWS" priority = 7 - read_only_nodes = 0 + electable_specs { + instance_size = "M10" + node_count = 3 + } } } replication_specs { zone_name = "EU" num_shards = 1 - regions_config { + region_configs { + auto_scaling { + disk_gb_enabled = false + } region_name = "EU_WEST_1" - electable_nodes = 3 + provider_name = "AWS" priority = 7 - read_only_nodes = 0 + electable_specs { + instance_size = "M10" + node_count = 3 + } } } replication_specs { zone_name = "DE" num_shards = 1 - regions_config { + region_configs { + auto_scaling { + disk_gb_enabled = false + } region_name = "EU_NORTH_1" - electable_nodes = 3 + provider_name = "AWS" priority = 7 - read_only_nodes = 0 + electable_specs { + instance_size = "M10" + node_count = 3 + } } } replication_specs { zone_name = "JP" num_shards = 1 - regions_config { + region_configs { + auto_scaling { + disk_gb_enabled = false + } region_name = "AP_NORTHEAST_1" - electable_nodes = 3 + provider_name = "AWS" priority = 7 - read_only_nodes = 0 + electable_specs { + instance_size = "M10" + node_count = 3 + } } } ` diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index c581f88464..04a6262553 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -61,23 +61,27 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { `, req.ResourceDependencyName) } clusterTerraformStr := fmt.Sprintf(` - resource "mongodbatlas_cluster" "test_cluster" { + resource "mongodbatlas_advanced_cluster" "test_cluster" { project_id = %[1]q name = %[2]q - cloud_backup = %[3]t - auto_scaling_disk_gb_enabled = false - provider_name = %[4]q - provider_instance_size_name = "M10" - - cluster_type = %[5]q + backup_enabled = %[3]t + cluster_type = %[5]q + replication_specs { num_shards = 1 zone_name = "Zone 1" - regions_config { - region_name = "US_WEST_2" - electable_nodes = 3 + region_configs { + auto_scaling { + disk_gb_enabled = false + } + provider_name = %[4]q + region_name = "US_WEST_2" priority = 7 - read_only_nodes = 0 + + electable_specs { + instance_size = "M10" + node_count = 3 + } } } %[6]s @@ -88,7 +92,7 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { ProjectIDStr: fmt.Sprintf("%q", projectID), ProjectID: projectID, ClusterName: clusterName, - ClusterNameStr: "mongodbatlas_cluster.test_cluster.name", + ClusterNameStr: "mongodbatlas_advanced_cluster.test_cluster.name", ClusterTerraformStr: clusterTerraformStr, } } From 249a523fd9396170d62ecd5c58ed7dae9dc7a444 Mon Sep 17 00:00:00 2001 From: Marco Suma Date: Tue, 9 Jul 2024 16:50:11 +0200 Subject: [PATCH 19/84] fix: Returns error if the analyzers attribute contains unknown fields. (#2394) * fix: Returns error if the analyzers attribute contains unknown fields. * adds changelog file. * Update .changelog/2394.txt Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> --------- Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> --- .changelog/2394.txt | 3 +++ .../searchindex/resource_search_index.go | 10 +++++--- .../searchindex/resource_search_index_test.go | 24 +++++++++++++++++-- 3 files changed, 32 insertions(+), 5 deletions(-) create mode 100644 .changelog/2394.txt diff --git a/.changelog/2394.txt b/.changelog/2394.txt new file mode 100644 index 0000000000..6afb5599ae --- /dev/null +++ b/.changelog/2394.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_search_index: Returns error if the `analyzers` attribute contains unknown fields +``` diff --git a/internal/service/searchindex/resource_search_index.go b/internal/service/searchindex/resource_search_index.go index 72f0d0b783..da544e5b27 100644 --- a/internal/service/searchindex/resource_search_index.go +++ b/internal/service/searchindex/resource_search_index.go @@ -1,6 +1,7 @@ package searchindex import ( + "bytes" "context" "encoding/json" "errors" @@ -387,8 +388,8 @@ func flattenSearchIndexSynonyms(synonyms []admin.SearchSynonymMappingDefinition) } func marshalSearchIndex(fields any) (string, error) { - bytes, err := json.Marshal(fields) - return string(bytes), err + respBytes, err := json.Marshal(fields) + return string(respBytes), err } func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { @@ -566,7 +567,10 @@ func unmarshalSearchIndexAnalyzersFields(str string) ([]admin.AtlasSearchAnalyze if str == "" { return fields, nil } - if err := json.Unmarshal([]byte(str), &fields); err != nil { + dec := json.NewDecoder(bytes.NewReader([]byte(str))) + dec.DisallowUnknownFields() + + if err := dec.Decode(&fields); err != nil { return nil, diag.Errorf("cannot unmarshal search index attribute `analyzers` because it has an incorrect format") } return fields, nil diff --git a/internal/service/searchindex/resource_search_index_test.go b/internal/service/searchindex/resource_search_index_test.go index d0edb9cc84..4600a2cb0a 100644 --- a/internal/service/searchindex/resource_search_index_test.go +++ b/internal/service/searchindex/resource_search_index_test.go @@ -3,6 +3,7 @@ package searchindex_test import ( "context" "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -114,6 +115,10 @@ func TestAccSearchIndex_updatedToEmptyAnalyzers(t *testing.T) { Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), Check: checkAdditionalAnalyzers(projectID, indexName, databaseName, clusterName, false), }, + { + Config: configAdditional(projectID, indexName, databaseName, clusterName, incorrectFormatAnalyzersTF), + ExpectError: regexp.MustCompile("cannot unmarshal search index attribute `analyzers` because it has an incorrect format"), + }, }, }) } @@ -437,8 +442,9 @@ const ( with = true without = false - analyzersTF = "\nanalyzers = <<-EOF\n" + analyzersJSON + "\nEOF\n" - mappingsFieldsTF = "\nmappings_fields = <<-EOF\n" + mappingsFieldsJSON + "\nEOF\n" + analyzersTF = "\nanalyzers = <<-EOF\n" + analyzersJSON + "\nEOF\n" + incorrectFormatAnalyzersTF = "\nanalyzers = <<-EOF\n" + incorrectFormatAnalyzersJSON + "\nEOF\n" + mappingsFieldsTF = "\nmappings_fields = <<-EOF\n" + mappingsFieldsJSON + "\nEOF\n" analyzersJSON = ` [ @@ -509,4 +515,18 @@ const ( "similarity": "euclidean" }] ` + + incorrectFormatAnalyzersJSON = ` + [ + { + "wrongField":[ + { + "type":"length", + "min":20, + "max":33 + } + ] + } + ] + ` ) From 4377ba1e5db3f7d7ad946f9a1fff24e5c944e0f3 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Tue, 9 Jul 2024 14:52:08 +0000 Subject: [PATCH 20/84] chore: Updates CHANGELOG.md for #2394 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 590c1da37e..579c49f059 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ BUG FIXES: * resource/mongodbatlas_cloud_backup_schedule: Updates `copy_settings` on changes (even when empty) ([#2387](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2387)) +* resource/mongodbatlas_search_index: Returns error if the `analyzers` attribute contains unknown fields ([#2394](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2394)) ## 1.17.3 (June 27, 2024) From b6cdcbf28583624e6c3c247819c4a67515697c97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 09:29:50 +0200 Subject: [PATCH 21/84] chore: Bump github.com/aws/aws-sdk-go from 1.54.13 to 1.54.17 (#2401) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.54.13 to 1.54.17. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.13...v1.54.17) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f43688becb..5b53279f24 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 - github.com/aws/aws-sdk-go v1.54.13 + github.com/aws/aws-sdk-go v1.54.17 github.com/go-test/deep v1.1.1 github.com/hashicorp/go-changelog v0.0.0-20240318095659-4d68c58a6e7f github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 diff --git a/go.sum b/go.sum index 0926edc4cc..dd001d6520 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.54.13 h1:zpCuiG+/mFdDY/klKJvmSioAZWk45F4rLGq0JWVAAzk= -github.com/aws/aws-sdk-go v1.54.13/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.17 h1:ZV/qwcCIhMHgsJ6iXXPVYI0s1MdLT+5LW28ClzCUPeI= +github.com/aws/aws-sdk-go v1.54.17/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= From 5b551d74b67aa382830d1a5bf951cd8caf781ecf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 09:30:32 +0200 Subject: [PATCH 22/84] chore: Bump github.com/hashicorp/terraform-plugin-testing (#2400) Bumps [github.com/hashicorp/terraform-plugin-testing](https://github.com/hashicorp/terraform-plugin-testing) from 1.8.0 to 1.9.0. - [Release notes](https://github.com/hashicorp/terraform-plugin-testing/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-testing/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-testing/compare/v1.8.0...v1.9.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/terraform-plugin-testing dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 18 +++++++++--------- go.sum | 39 ++++++++++++++++++++------------------- 2 files changed, 29 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index 5b53279f24..1a649f10a9 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/hashicorp/terraform-plugin-mux v0.16.0 github.com/hashicorp/terraform-plugin-sdk v1.17.2 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 - github.com/hashicorp/terraform-plugin-testing v1.8.0 + github.com/hashicorp/terraform-plugin-testing v1.9.0 github.com/mongodb-forks/digest v1.1.0 github.com/spf13/cast v1.6.0 github.com/stretchr/testify v1.9.0 @@ -76,7 +76,7 @@ require ( github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/hc-install v0.6.4 // indirect + github.com/hashicorp/hc-install v0.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect @@ -121,15 +121,15 @@ require ( go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/api v0.162.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect diff --git a/go.sum b/go.sum index dd001d6520..9dcc38e501 100644 --- a/go.sum +++ b/go.sum @@ -499,8 +499,8 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -535,8 +535,8 @@ github.com/hashicorp/terraform-plugin-sdk v1.17.2/go.mod h1:wkvldbraEMkz23NxkkAs github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= github.com/hashicorp/terraform-plugin-test/v2 v2.2.1/go.mod h1:eZ9JL3O69Cb71Skn6OhHyj17sLmHRb+H6VrDcJjKrYU= -github.com/hashicorp/terraform-plugin-testing v1.8.0 h1:wdYIgwDk4iO933gC4S8KbKdnMQShu6BXuZQPScmHvpk= -github.com/hashicorp/terraform-plugin-testing v1.8.0/go.mod h1:o2kOgf18ADUaZGhtOl0YCkfIxg01MAiMATT2EtIHlZk= +github.com/hashicorp/terraform-plugin-testing v1.9.0 h1:xOsQRqqlHKXpFq6etTxih3ubdK3HVDtfE1IY7Rpd37o= +github.com/hashicorp/terraform-plugin-testing v1.9.0/go.mod h1:fhhVx/8+XNJZTD5o3b4stfZ6+q7z9+lIWigIYdT6/44= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= @@ -829,8 +829,8 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -874,8 +874,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -944,8 +944,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -990,8 +990,8 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1089,8 +1089,8 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1104,8 +1104,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1125,8 +1125,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1194,8 +1194,9 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 6e8a5cd8aa6967879b8eed383e6bd46a23eb258e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 09:30:59 +0200 Subject: [PATCH 23/84] chore: Bump github.com/hashicorp/terraform-plugin-framework (#2398) Bumps [github.com/hashicorp/terraform-plugin-framework](https://github.com/hashicorp/terraform-plugin-framework) from 1.9.0 to 1.10.0. - [Release notes](https://github.com/hashicorp/terraform-plugin-framework/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-framework/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-framework/compare/v1.9.0...v1.10.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/terraform-plugin-framework dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1a649f10a9..a599bc0752 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-version v1.7.0 github.com/hashicorp/hcl/v2 v2.21.0 - github.com/hashicorp/terraform-plugin-framework v1.9.0 + github.com/hashicorp/terraform-plugin-framework v1.10.0 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 github.com/hashicorp/terraform-plugin-go v0.23.0 diff --git a/go.sum b/go.sum index 9dcc38e501..af7a354ccb 100644 --- a/go.sum +++ b/go.sum @@ -518,8 +518,8 @@ github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf github.com/hashicorp/terraform-json v0.10.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= -github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwkB2+kXouLC932Li9qelozrc= +github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= From 4bc8a0781bf407b23d0efd46bb3fe069a3470ebe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 09:31:22 +0200 Subject: [PATCH 24/84] chore: Bump github.com/hashicorp/terraform-plugin-framework-validators (#2399) Bumps [github.com/hashicorp/terraform-plugin-framework-validators](https://github.com/hashicorp/terraform-plugin-framework-validators) from 0.12.0 to 0.13.0. - [Release notes](https://github.com/hashicorp/terraform-plugin-framework-validators/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-framework-validators/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-framework-validators/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/terraform-plugin-framework-validators dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a599bc0752..eb750c90f4 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/hashicorp/hcl/v2 v2.21.0 github.com/hashicorp/terraform-plugin-framework v1.10.0 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 - github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 github.com/hashicorp/terraform-plugin-go v0.23.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-mux v0.16.0 diff --git a/go.sum b/go.sum index af7a354ccb..2d40220fdb 100644 --- a/go.sum +++ b/go.sum @@ -522,8 +522,8 @@ github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwk github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= From ec5424b006be59b5047358154905d090ae7181b5 Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Thu, 11 Jul 2024 07:27:37 +0100 Subject: [PATCH 25/84] test: Uses hclwrite to generate the cluster for GetClusterInfo (#2404) * test: Use hclwrite to generate the cluster for GetClusterInfo * test: fix unit test * refactor: minor improvements * refactor: use Zone 1 as the default ZoneName to make tests pass * refactor: remove num_shards in request and add more tests * fix: use same default region as before * test: Support disk_size_gb for ClusterInfo and add test case for multiple dependencies * refactor: move replication specs to ClusterRequest * test: add support for CloudRegionConfig * add: suggestions from PR comments * refactor: use acc.ReplicationSpecRequest instead of admin.ReplicationSpec --- .../resource_cloud_backup_schedule_test.go | 3 +- .../resource_global_cluster_config_test.go | 73 +---- internal/testutil/acc/cluster.go | 118 ++++---- internal/testutil/acc/config_formatter.go | 190 ++++++++++++- .../testutil/acc/config_formatter_test.go | 254 ++++++++++++++++++ 5 files changed, 522 insertions(+), 116 deletions(-) diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 8dbabca280..c9dff0becc 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -374,7 +374,8 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true, ProviderName: constant.AZURE}) + spec = acc.ReplicationSpecRequest{ProviderName: constant.AZURE} + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{spec}}) ) resource.ParallelTest(t, resource.TestCase{ diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go index 3d8149c92c..68cdc51f5e 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go @@ -80,7 +80,11 @@ func TestAccClusterRSGlobalCluster_withAWSAndBackup(t *testing.T) { func TestAccClusterRSGlobalCluster_database(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{Geosharded: true, ExtraConfig: zonesStr}) + specUS = acc.ReplicationSpecRequest{ZoneName: "US", Region: "US_EAST_1"} + specEU = acc.ReplicationSpecRequest{ZoneName: "EU", Region: "EU_WEST_1"} + specDE = acc.ReplicationSpecRequest{ZoneName: "DE", Region: "EU_NORTH_1"} + specJP = acc.ReplicationSpecRequest{ZoneName: "JP", Region: "AP_NORTHEAST_1"} + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{Geosharded: true, ReplicationSpecs: []acc.ReplicationSpecRequest{specUS, specEU, specDE, specJP}}) ) resource.Test(t, resource.TestCase{ @@ -268,71 +272,4 @@ const ( zone = "JP" } ` - - zonesStr = ` - replication_specs { - zone_name = "US" - num_shards = 1 - region_configs { - auto_scaling { - disk_gb_enabled = false - } - region_name = "US_EAST_1" - provider_name = "AWS" - priority = 7 - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - replication_specs { - zone_name = "EU" - num_shards = 1 - region_configs { - auto_scaling { - disk_gb_enabled = false - } - region_name = "EU_WEST_1" - provider_name = "AWS" - priority = 7 - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - replication_specs { - zone_name = "DE" - num_shards = 1 - region_configs { - auto_scaling { - disk_gb_enabled = false - } - region_name = "EU_NORTH_1" - provider_name = "AWS" - priority = 7 - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - replication_specs { - zone_name = "JP" - num_shards = 1 - region_configs { - auto_scaling { - disk_gb_enabled = false - } - region_name = "AP_NORTHEAST_1" - provider_name = "AWS" - priority = 7 - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - ` ) diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index 04a6262553..286fee15e3 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -6,12 +6,14 @@ import ( "testing" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) type ClusterRequest struct { - ProviderName string - ExtraConfig string ResourceDependencyName string + ClusterNameExplicit string + ReplicationSpecs []ReplicationSpecRequest + DiskSizeGb int CloudBackup bool Geosharded bool } @@ -20,6 +22,7 @@ type ClusterInfo struct { ProjectIDStr string ProjectID string ClusterName string + ClusterResourceName string ClusterNameStr string ClusterTerraformStr string } @@ -32,9 +35,6 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { if req == nil { req = new(ClusterRequest) } - if req.ProviderName == "" { - req.ProviderName = constant.AWS - } clusterName := os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") if clusterName != "" && projectID != "" { @@ -47,52 +47,17 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { } } projectID = ProjectIDExecution(tb) - clusterName = RandomClusterName() - clusterTypeStr := "REPLICASET" - if req.Geosharded { - clusterTypeStr = "GEOSHARDED" - } - dependsOnClause := "" - if req.ResourceDependencyName != "" { - dependsOnClause = fmt.Sprintf(` - depends_on = [ - %[1]s - ] - `, req.ResourceDependencyName) + clusterTerraformStr, clusterName, err := ClusterResourceHcl(projectID, req) + if err != nil { + tb.Error(err) } - clusterTerraformStr := fmt.Sprintf(` - resource "mongodbatlas_advanced_cluster" "test_cluster" { - project_id = %[1]q - name = %[2]q - backup_enabled = %[3]t - cluster_type = %[5]q - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - region_configs { - auto_scaling { - disk_gb_enabled = false - } - provider_name = %[4]q - region_name = "US_WEST_2" - priority = 7 - - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - %[6]s - %[7]s - } - `, projectID, clusterName, req.CloudBackup, req.ProviderName, clusterTypeStr, req.ExtraConfig, dependsOnClause) + clusterResourceName := "mongodbatlas_advanced_cluster.cluster_info" return ClusterInfo{ ProjectIDStr: fmt.Sprintf("%q", projectID), ProjectID: projectID, ClusterName: clusterName, - ClusterNameStr: "mongodbatlas_advanced_cluster.test_cluster.name", + ClusterNameStr: fmt.Sprintf("%s.name", clusterResourceName), + ClusterResourceName: clusterResourceName, ClusterTerraformStr: clusterTerraformStr, } } @@ -102,3 +67,64 @@ func ExistingClusterUsed() bool { projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") return clusterName != "" && projectID != "" } + +type ReplicationSpecRequest struct { + ZoneName string + Region string + InstanceSize string + ProviderName string + ExtraRegionConfigs []ReplicationSpecRequest + NodeCount int +} + +func (r *ReplicationSpecRequest) AddDefaults() { + if r.NodeCount == 0 { + r.NodeCount = 3 + } + if r.ZoneName == "" { + r.ZoneName = "Zone 1" + } + if r.Region == "" { + r.Region = "US_WEST_2" + } + if r.InstanceSize == "" { + r.InstanceSize = "M10" + } + if r.ProviderName == "" { + r.ProviderName = constant.AWS + } +} + +func (r *ReplicationSpecRequest) AllRegionConfigs() []admin.CloudRegionConfig { + config := CloudRegionConfig(*r) + configs := []admin.CloudRegionConfig{config} + for _, extra := range r.ExtraRegionConfigs { + configs = append(configs, CloudRegionConfig(extra)) + } + return configs +} + +func ReplicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { + if req == nil { + req = new(ReplicationSpecRequest) + } + req.AddDefaults() + defaultNumShards := 1 + regionConfigs := req.AllRegionConfigs() + return admin.ReplicationSpec{ + NumShards: &defaultNumShards, + ZoneName: &req.ZoneName, + RegionConfigs: ®ionConfigs, + } +} + +func CloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { + return admin.CloudRegionConfig{ + RegionName: &req.Region, + ProviderName: &req.ProviderName, + ElectableSpecs: &admin.HardwareSpec{ + InstanceSize: &req.InstanceSize, + NodeCount: &req.NodeCount, + }, + } +} diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index 93b9e40ced..6385fc9182 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -1,9 +1,17 @@ package acc import ( + "encoding/json" "fmt" + "regexp" "sort" "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func FormatToHCLMap(m map[string]string, indent, varName string) string { @@ -41,7 +49,6 @@ func FormatToHCLLifecycleIgnore(keys ...string) string { return strings.Join(lines, "\n") } -// make test deterministic func sortStringMapKeys(m map[string]string) []string { keys := make([]string, 0, len(m)) for k := range m { @@ -50,3 +57,184 @@ func sortStringMapKeys(m map[string]string) []string { sort.Strings(keys) return keys } +func sortStringMapKeysAny(m map[string]any) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +var matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") +var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") + +func ToSnakeCase(str string) string { + snake := matchFirstCap.ReplaceAllString(str, "${1}_${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}") + return strings.ToLower(snake) +} + +func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clusterName string, err error) { + if req == nil { + req = new(ClusterRequest) + } + specRequests := req.ReplicationSpecs + if len(specRequests) == 0 { + specRequests = append(specRequests, ReplicationSpecRequest{}) + } + specs := make([]admin.ReplicationSpec, len(specRequests)) + for i, specRequest := range specRequests { + specs[i] = ReplicationSpec(&specRequest) + } + clusterName = req.ClusterNameExplicit + if clusterName == "" { + clusterName = RandomClusterName() + } + clusterTypeStr := "REPLICASET" + if req.Geosharded { + clusterTypeStr = "GEOSHARDED" + } + + f := hclwrite.NewEmptyFile() + root := f.Body() + cluster := root.AppendNewBlock("resource", []string{"mongodbatlas_advanced_cluster", "cluster_info"}).Body() + clusterRootAttributes := map[string]any{ + "project_id": projectID, + "cluster_type": clusterTypeStr, + "name": clusterName, + "backup_enabled": req.CloudBackup, + } + if req.DiskSizeGb != 0 { + clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb + } + addPrimitiveAttributes(cluster, clusterRootAttributes) + cluster.AppendNewline() + for i, spec := range specs { + err = writeReplicationSpec(cluster, spec) + if err != nil { + return "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) + } + } + cluster.AppendNewline() + if req.ResourceDependencyName != "" { + if !strings.Contains(req.ResourceDependencyName, ".") { + return "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") + } + err = setAttributeHcl(cluster, fmt.Sprintf("depends_on = [%s]", req.ResourceDependencyName)) + if err != nil { + return "", "", err + } + } + return "\n" + string(f.Bytes()), clusterName, err +} + +func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { + replicationBlock := cluster.AppendNewBlock("replication_specs", nil).Body() + err := addPrimitiveAttributesViaJSON(replicationBlock, spec) + if err != nil { + return err + } + for _, rc := range spec.GetRegionConfigs() { + if rc.Priority == nil { + rc.SetPriority(7) + } + replicationBlock.AppendNewline() + rcBlock := replicationBlock.AppendNewBlock("region_configs", nil).Body() + err = addPrimitiveAttributesViaJSON(rcBlock, rc) + if err != nil { + return err + } + autoScalingBlock := rcBlock.AppendNewBlock("auto_scaling", nil).Body() + if rc.AutoScaling == nil { + autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(false)) + } else { + autoScaling := rc.GetAutoScaling() + return fmt.Errorf("auto_scaling on replication spec is not supportd yet %v", autoScaling) + } + nodeSpec := rc.GetElectableSpecs() + nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() + err = addPrimitiveAttributesViaJSON(nodeSpecBlock, nodeSpec) + } + return err +} + +// addPrimitiveAttributesViaJSON adds "primitive" bool/string/int/float attributes of a struct. +func addPrimitiveAttributesViaJSON(b *hclwrite.Body, obj any) error { + var objMap map[string]any + inrec, err := json.Marshal(obj) + if err != nil { + return err + } + err = json.Unmarshal(inrec, &objMap) + if err != nil { + return err + } + addPrimitiveAttributes(b, objMap) + return nil +} + +func addPrimitiveAttributes(b *hclwrite.Body, values map[string]any) { + for _, keyCamel := range sortStringMapKeysAny(values) { + key := ToSnakeCase(keyCamel) + value := values[keyCamel] + switch value := value.(type) { + case bool: + b.SetAttributeValue(key, cty.BoolVal(value)) + case string: + if value != "" { + b.SetAttributeValue(key, cty.StringVal(value)) + } + case int: + b.SetAttributeValue(key, cty.NumberIntVal(int64(value))) + // int gets parsed as float64 for json + case float64: + b.SetAttributeValue(key, cty.NumberIntVal(int64(value))) + default: + continue + } + } +} + +// Sometimes it is easier to set a value using hcl/tf syntax instead of creating complex values like list hcl.Traversal. +func setAttributeHcl(body *hclwrite.Body, tfExpression string) error { + src := []byte(tfExpression) + + f, diags := hclwrite.ParseConfig(src, "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return fmt.Errorf("extract attribute error %s\nparsing %s", diags, tfExpression) + } + expressionAttributes := f.Body().Attributes() + if len(expressionAttributes) != 1 { + return fmt.Errorf("must be a single attribute in expression: %s", tfExpression) + } + tokens := hclwrite.Tokens{} + for _, attr := range expressionAttributes { + tokens = attr.BuildTokens(tokens) + } + if len(tokens) == 0 { + return fmt.Errorf("no tokens found for expression %s", tfExpression) + } + var attributeName string + valueTokens := []*hclwrite.Token{} + equalFound := false + for _, token := range tokens { + if attributeName == "" && token.Type == hclsyntax.TokenIdent { + attributeName = string(token.Bytes) + } + if equalFound { + valueTokens = append(valueTokens, token) + } + if token.Type == hclsyntax.TokenEqual { + equalFound = true + } + } + if attributeName == "" { + return fmt.Errorf("unable to find the attribute name set for expr=%s", tfExpression) + } + if len(valueTokens) == 0 { + return fmt.Errorf("unable to find the attribute value set for expr=%s", tfExpression) + } + body.SetAttributeRaw(attributeName, valueTokens) + return nil +} diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 16ac5ef7f8..263f22ce9f 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -4,8 +4,10 @@ import ( "fmt" "testing" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func projectTemplateWithExtra(extra string) string { @@ -104,3 +106,255 @@ func TestFormatToHCLLifecycleIgnore(t *testing.T) { }) } } + +var standardClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var overrideClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = true + cluster_type = "GEOSHARDED" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone X" + + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = "MY_REGION_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M30" + node_count = 30 + } + } + } + +} +` + +var dependsOnClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_project.project_execution] +} +` +var dependsOnMultiResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple] +} +` +var twoReplicationSpecs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + replication_specs { + num_shards = 1 + zone_name = "Zone 2" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var twoRegionConfigs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` + +func Test_ClusterResourceHcl(t *testing.T) { + var ( + clusterName = "my-name" + testCases = map[string]struct { + expected string + req acc.ClusterRequest + }{ + "defaults": { + standardClusterResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName}, + }, + "dependsOn": { + dependsOnClusterResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, + }, + "dependsOnMulti": { + dependsOnMultiResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, + }, + "twoReplicationSpecs": { + twoReplicationSpecs, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_WEST_1", ZoneName: "Zone 1"}, + {Region: "EU_WEST_2", ZoneName: "Zone 2"}, + }}, + }, + "overrideClusterResource": { + overrideClusterResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName, Geosharded: true, CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE}, + }}, + }, + "twoRegionConfigs": { + twoRegionConfigs, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + { + Region: "US_WEST_1", + InstanceSize: "M10", + NodeCount: 3, + ExtraRegionConfigs: []acc.ReplicationSpecRequest{{Region: "EU_WEST_1", InstanceSize: "M10", NodeCount: 3, ProviderName: constant.AWS}}, + }, + }, + }, + }, + } + ) + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + config, actualClusterName, err := acc.ClusterResourceHcl("project", &tc.req) + require.NoError(t, err) + assert.Equal(t, clusterName, actualClusterName) + assert.Equal(t, tc.expected, config) + }) + } +} From 9f78a9e41e45d86402e634818a53ef989a1bac9b Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 11 Jul 2024 09:37:52 +0200 Subject: [PATCH 26/84] fix: Fixes `disk_iops` attribute for Azure cloud provider in `mongodbatlas_advanced_cluster` resource (#2396) * fix disk_iops in Azure * expand * tests for disk_iops --- .changelog/2396.txt | 3 +++ .../advancedcluster/model_advanced_cluster.go | 8 ++++++-- .../resource_advanced_cluster_test.go | 12 ++++++++++-- 3 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 .changelog/2396.txt diff --git a/.changelog/2396.txt b/.changelog/2396.txt new file mode 100644 index 0000000000..5bb53f7fda --- /dev/null +++ b/.changelog/2396.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_advanced_cluster: Fixes `disk_iops` attribute for Azure cloud provider +``` diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 0bd011c764..8f26c1312b 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -634,10 +634,12 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHa if len(tfMapObjects) > 0 { tfMapObject := tfMapObjects[0].(map[string]any) - if providerName == "AWS" { + if providerName == constant.AWS || providerName == constant.AZURE { if cast.ToInt64(apiObject.GetDiskIOPS()) > 0 { tfMap["disk_iops"] = apiObject.GetDiskIOPS() } + } + if providerName == constant.AWS { if v, ok := tfMapObject["ebs_volume_type"]; ok && v.(string) != "" { tfMap["ebs_volume_type"] = apiObject.GetEbsVolumeType() } @@ -850,10 +852,12 @@ func expandRegionConfig(tfMap map[string]any) *admin.CloudRegionConfig { func expandRegionConfigSpec(tfList []any, providerName string) *admin.DedicatedHardwareSpec { tfMap, _ := tfList[0].(map[string]any) apiObject := new(admin.DedicatedHardwareSpec) - if providerName == "AWS" { + if providerName == constant.AWS || providerName == constant.AZURE { if v, ok := tfMap["disk_iops"]; ok && v.(int) > 0 { apiObject.DiskIOPS = conversion.Pointer(v.(int)) } + } + if providerName == constant.AWS { if v, ok := tfMap["ebs_volume_type"]; ok { apiObject.EbsVolumeType = conversion.StringPtr(v.(string)) } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 80823e1735..8f72ce42de 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -675,7 +675,9 @@ func checkSingleProvider(projectID, name string) resource.TestCheckFunc { "name": name}, resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), - resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0))) + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0))) } func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string { @@ -830,7 +832,13 @@ func checkMultiCloudSharded(name string) resource.TestCheckFunc { return checkAggr( []string{"project_id", "replication_specs.#", "replication_specs.0.region_configs.#"}, map[string]string{ - "name": name}) + "name": name}, + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.1.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.1.electable_specs.0.disk_iops", acc.IntGreatThan(0))) } func configSingleProviderPaused(projectID, clusterName string, paused bool, instanceSize string) string { From 93733efb9a5eac66900ee740037bba9c71e03fc6 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Thu, 11 Jul 2024 07:39:57 +0000 Subject: [PATCH 27/84] chore: Updates CHANGELOG.md for #2396 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 579c49f059..56116aa948 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ BUG FIXES: +* resource/mongodbatlas_advanced_cluster: Fixes `disk_iops` attribute for Azure cloud provider ([#2396](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2396)) * resource/mongodbatlas_cloud_backup_schedule: Updates `copy_settings` on changes (even when empty) ([#2387](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2387)) * resource/mongodbatlas_search_index: Returns error if the `analyzers` attribute contains unknown fields ([#2394](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2394)) From dd7d62b5171b1236f5843e080c7226fe05fb71a8 Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Fri, 12 Jul 2024 06:47:44 +0100 Subject: [PATCH 28/84] test: Refactors `mongodbatlas_private_endpoint_regional_mode` to use cluster info (#2403) * test: refactor to use cluster info * test: enable test in CI and fix duplicate zone name * test: use AWS_REGION_UPPERCASE and add pre-checks * fix: use clusterResourceName * test: fix GetClusterInfo call * fix: pre check call * fix: add UPPERCASE/LOWERCASE to network test suite * test: Skip in ci since it is slow and use new GetClusterInfo api * test: Fix the broken test and simpify assert statements * test: enable in CI, after refactorings ~1230s --- .github/workflows/acceptance-tests-runner.yml | 2 + ...rce_private_endpoint_regional_mode_test.go | 93 +++++++------------ internal/testutil/acc/pre_check.go | 8 ++ 3 files changed, 42 insertions(+), 61 deletions(-) diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index 2c570456b0..bfda7bfd11 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -648,6 +648,8 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.aws_access_key_id }} AWS_SECRET_ACCESS_KEY: ${{ secrets.aws_secret_access_key }} AWS_REGION: ${{ vars.AWS_REGION }} + AWS_REGION_UPPERCASE: ${{ vars.AWS_REGION_UPPERCASE }} + AWS_REGION_LOWERCASE: ${{ vars.AWS_REGION_LOWERCASE }} AWS_SECURITY_GROUP_1: ${{ vars.AWS_SECURITY_GROUP_1 }} AWS_SECURITY_GROUP_2: ${{ vars.AWS_SECURITY_GROUP_2 }} AWS_VPC_CIDR_BLOCK: ${{ vars.AWS_VPC_CIDR_BLOCK }} diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go index bc89b6732f..a020aa6aa8 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "strconv" "strings" "testing" @@ -18,31 +17,31 @@ func TestAccPrivateEndpointRegionalMode_basic(t *testing.T) { } func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { - acc.SkipTestForCI(t) // needs AWS configuration - var ( - endpointResourceSuffix = "atlasple" - resourceSuffix = "atlasrm" - resourceName = fmt.Sprintf("mongodbatlas_private_endpoint_regional_mode.%s", resourceSuffix) - awsAccessKey = os.Getenv("AWS_ACCESS_KEY_ID") - awsSecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") - providerName = "AWS" - region = os.Getenv("AWS_REGION") - projectID = acc.ProjectIDExecution(t) - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() - clusterName = acc.RandomClusterName() - clusterResourceName = "test" - clusterResource = acc.ConfigClusterGlobal(orgID, projectName, clusterName) - clusterDataSource = modeClusterData(clusterResourceName, resourceSuffix, endpointResourceSuffix) - endpointResources = testConfigUnmanagedAWS( + endpointResourceSuffix = "atlasple" + resourceSuffix = "atlasrm" + resourceName = fmt.Sprintf("mongodbatlas_private_endpoint_regional_mode.%s", resourceSuffix) + awsAccessKey = os.Getenv("AWS_ACCESS_KEY_ID") + awsSecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + providerName = "AWS" + region = os.Getenv("AWS_REGION_LOWERCASE") + privatelinkEndpointServiceResourceName = fmt.Sprintf("mongodbatlas_privatelink_endpoint_service.%s", endpointResourceSuffix) + spec1 = acc.ReplicationSpecRequest{Region: os.Getenv("AWS_REGION_UPPERCASE"), ProviderName: providerName, ZoneName: "Zone 1"} + spec2 = acc.ReplicationSpecRequest{Region: "US_WEST_2", ProviderName: providerName, ZoneName: "Zone 2"} + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{Geosharded: true, DiskSizeGb: 80, ReplicationSpecs: []acc.ReplicationSpecRequest{spec1, spec2}}) + projectID = clusterInfo.ProjectID + clusterResourceName = clusterInfo.ClusterResourceName + clusterDataName = "data.mongodbatlas_advanced_cluster.test" + endpointResources = testConfigUnmanagedAWS( awsAccessKey, awsSecretKey, projectID, providerName, region, endpointResourceSuffix, ) - dependencies = []string{clusterResource, clusterDataSource, endpointResources} + clusterDataSource = modeClusterData(clusterResourceName, resourceName, privatelinkEndpointServiceResourceName) + dependencies = []string{clusterInfo.ClusterTerraformStr, clusterDataSource, endpointResources} ) resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.PreCheck(t) }, + PreCheck: func() { acc.PreCheckAwsEnvBasic(t); acc.PreCheckAwsRegionCases(t) }, + ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -50,9 +49,8 @@ func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { Config: configWithDependencies(resourceSuffix, projectID, false, dependencies), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - checkModeClustersUpToDate(projectID, clusterName, clusterResourceName), + resource.TestCheckResourceAttr(clusterDataName, "connection_strings.0.private_endpoint.#", "0"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttrSet(resourceName, "enabled"), resource.TestCheckResourceAttr(resourceName, "enabled", "false"), ), }, @@ -60,9 +58,8 @@ func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { Config: configWithDependencies(resourceSuffix, projectID, true, dependencies), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - checkModeClustersUpToDate(projectID, clusterName, clusterResourceName), + resource.TestCheckResourceAttr(clusterDataName, "connection_strings.0.private_endpoint.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttrSet(resourceName, "enabled"), resource.TestCheckResourceAttr(resourceName, "enabled", "true"), ), }, @@ -113,12 +110,12 @@ func basicTestCase(tb testing.TB) *resource.TestCase { func modeClusterData(clusterResourceName, regionalModeResourceName, privateLinkResourceName string) string { return fmt.Sprintf(` - data "mongodbatlas_cluster" %[1]q { - project_id = mongodbatlas_cluster.%[1]s.project_id - name = mongodbatlas_cluster.%[1]s.name + data "mongodbatlas_advanced_cluster" "test" { + project_id = %[1]s.project_id + name = %[1]s.name depends_on = [ - mongodbatlas_privatelink_endpoint_service.%[3]s, - mongodbatlas_private_endpoint_regional_mode.%[2]s + %[2]s, + %[3]s ] } `, clusterResourceName, regionalModeResourceName, privateLinkResourceName) @@ -179,32 +176,6 @@ func checkExists(resourceName string) resource.TestCheckFunc { } } -func checkModeClustersUpToDate(projectID, clusterName, clusterResourceName string) resource.TestCheckFunc { - resourceName := strings.Join([]string{"data", "mongodbatlas_cluster", clusterResourceName}, ".") - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return fmt.Errorf("Could not find resource state for cluster (%s) on project (%s)", clusterName, projectID) - } - var rsPrivateEndpointCount int - var err error - if rsPrivateEndpointCount, err = strconv.Atoi(rs.Primary.Attributes["connection_strings.0.private_endpoint.#"]); err != nil { - return fmt.Errorf("Connection strings private endpoint count is not a number") - } - c, _, _ := acc.Conn().Clusters.Get(context.Background(), projectID, clusterName) - if rsPrivateEndpointCount != len(c.ConnectionStrings.PrivateEndpoint) { - return fmt.Errorf("Cluster PrivateEndpoint count does not match resource") - } - if rs.Primary.Attributes["connection_strings.0.standard"] != c.ConnectionStrings.Standard { - return fmt.Errorf("Cluster standard connection_string does not match resource") - } - if rs.Primary.Attributes["connection_strings.0.standard_srv"] != c.ConnectionStrings.StandardSrv { - return fmt.Errorf("Cluster standard connection_string does not match resource") - } - return nil - } -} - func checkDestroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "mongodbatlas_private_endpoint_regional_mode" { @@ -221,14 +192,14 @@ func checkDestroy(s *terraform.State) error { func testConfigUnmanagedAWS(awsAccessKey, awsSecretKey, projectID, providerName, region, serviceResourceName string) string { return fmt.Sprintf(` provider "aws" { - region = "%[5]s" - access_key = "%[1]s" - secret_key = "%[2]s" + region = %[5]q + access_key = %[1]q + secret_key = %[2]q } resource "mongodbatlas_privatelink_endpoint" "test" { - project_id = "%[3]s" - provider_name = "%[4]s" - region = "%[5]s" + project_id = %[3]q + provider_name = %[4]q + region = %[5]q } resource "aws_vpc_endpoint" "ptfe_service" { vpc_id = aws_vpc.primary.id diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index 97f91a1d7b..339f092e05 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -180,6 +180,14 @@ func PreCheckAwsEnv(tb testing.TB) { } } +func PreCheckAwsRegionCases(tb testing.TB) { + tb.Helper() + if os.Getenv("AWS_REGION_UPPERCASE") == "" || + os.Getenv("AWS_REGION_LOWERCASE") == "" { + tb.Fatal("`AWS_REGION_UPPERCASE`, `AWS_REGION_LOWERCASE` must be set for acceptance testing") + } +} + func PreCheckAwsEnvPrivateLinkEndpointService(tb testing.TB) { tb.Helper() if os.Getenv("AWS_ACCESS_KEY_ID") == "" || From c5c5bd7e1055921ce74f6079108ba76aa4c550ac Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Fri, 12 Jul 2024 15:22:09 +0100 Subject: [PATCH 29/84] test: Refactors resource tests to use GetClusterInfo `online_archive` (#2409) * feat: adds support for Tags & AutoScalingDiskGbEnabled * feat: refactor tests to use GetClusterInfo & new SDK * chore: fomatting fix * test: make unit test deterministic * test: onlinearchive force us_east_1 * spelling in comment * test: fix migration test to use package clusterRequest (with correct region) --- .../resource_online_archive_migration_test.go | 19 +- .../resource_online_archive_test.go | 230 ++++++++---------- internal/testutil/acc/cluster.go | 17 +- internal/testutil/acc/config_formatter.go | 14 +- .../testutil/acc/config_formatter_test.go | 44 ++++ 5 files changed, 172 insertions(+), 152 deletions(-) diff --git a/internal/service/onlinearchive/resource_online_archive_migration_test.go b/internal/service/onlinearchive/resource_online_archive_migration_test.go index bce4755b2e..f0edb81963 100644 --- a/internal/service/onlinearchive/resource_online_archive_migration_test.go +++ b/internal/service/onlinearchive/resource_online_archive_migration_test.go @@ -1,11 +1,8 @@ package onlinearchive_test import ( - "os" "testing" - matlas "go.mongodb.org/atlas/mongodbatlas" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" @@ -14,18 +11,18 @@ import ( func TestMigBackupRSOnlineArchiveWithNoChangeBetweenVersions(t *testing.T) { var ( - cluster matlas.Cluster - resourceName = "mongodbatlas_cluster.online_archive_test" onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() - clusterName = acc.RandomClusterName() + clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterName = clusterInfo.ClusterName + projectID = clusterInfo.ProjectID + clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterResourceName = clusterInfo.ClusterResourceName deleteExpirationDays = 0 ) if mig.IsProviderVersionAtLeast("1.12.2") { deleteExpirationDays = 7 } - config := configWithDailySchedule(orgID, projectName, clusterName, 1, deleteExpirationDays) + config := configWithDailySchedule(clusterTerraformStr, clusterResourceName, 1, deleteExpirationDays) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { mig.PreCheckBasic(t) }, @@ -33,9 +30,9 @@ func TestMigBackupRSOnlineArchiveWithNoChangeBetweenVersions(t *testing.T) { Steps: []resource.TestStep{ { ExternalProviders: mig.ExternalProviders(), - Config: configFirstStep(orgID, projectName, clusterName), + Config: clusterTerraformStr, Check: resource.ComposeAggregateTestCheckFunc( - populateWithSampleData(resourceName, &cluster), + populateWithSampleData(clusterResourceName, projectID, clusterName), ), }, { diff --git a/internal/service/onlinearchive/resource_online_archive_test.go b/internal/service/onlinearchive/resource_online_archive_test.go index 4386bb9fdf..fecf5469a0 100644 --- a/internal/service/onlinearchive/resource_online_archive_test.go +++ b/internal/service/onlinearchive/resource_online_archive_test.go @@ -4,29 +4,37 @@ import ( "context" "fmt" "log" - "os" "regexp" "testing" "time" - matlas "go.mongodb.org/atlas/mongodbatlas" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" ) +var ( + clusterRequest = acc.ClusterRequest{ + ReplicationSpecs: []acc.ReplicationSpecRequest{ + // Must use US_EAST_1 in dev for online_archive to work + {AutoScalingDiskGbEnabled: true, Region: "US_EAST_1"}, + }, + Tags: map[string]string{ + "ArchiveTest": "true", "Owner": "test", + }, + } +) + func TestAccBackupRSOnlineArchive(t *testing.T) { var ( - cluster matlas.Cluster - resourceName = "mongodbatlas_cluster.online_archive_test" onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" onlineArchivesDataSourceName = "data.mongodbatlas_online_archives.all" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() - clusterName = acc.RandomClusterName() + clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterName = clusterInfo.ClusterName + projectID = clusterInfo.ProjectID + clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterResourceName = clusterInfo.ClusterResourceName ) resource.ParallelTest(t, resource.TestCase{ @@ -35,15 +43,13 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - // We need this step to pupulate the cluster with Sample Data - // The online archive won't work if the cluster does not have data - Config: configFirstStep(orgID, projectName, clusterName), + Config: clusterTerraformStr, Check: resource.ComposeAggregateTestCheckFunc( - populateWithSampleData(resourceName, &cluster), + populateWithSampleData(clusterResourceName, projectID, clusterName), ), }, { - Config: configWithDailySchedule(orgID, projectName, clusterName, 1, 7), + Config: configWithDailySchedule(clusterTerraformStr, clusterResourceName, 1, 7), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), @@ -59,7 +65,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { ), }, { - Config: configWithDailySchedule(orgID, projectName, clusterName, 2, 8), + Config: configWithDailySchedule(clusterTerraformStr, clusterResourceName, 2, 8), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), @@ -75,7 +81,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { ), }, { - Config: testAccBackupRSOnlineArchiveConfigWithWeeklySchedule(orgID, projectName, clusterName, 2), + Config: testAccBackupRSOnlineArchiveConfigWithWeeklySchedule(clusterTerraformStr, clusterResourceName, 2), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), @@ -88,7 +94,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { ), }, { - Config: testAccBackupRSOnlineArchiveConfigWithMonthlySchedule(orgID, projectName, clusterName, 2), + Config: testAccBackupRSOnlineArchiveConfigWithMonthlySchedule(clusterTerraformStr, clusterResourceName, 2), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), @@ -101,7 +107,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { ), }, { - Config: configWithoutSchedule(orgID, projectName, clusterName), + Config: configWithoutSchedule(clusterTerraformStr, clusterResourceName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), @@ -110,7 +116,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { ), }, { - Config: configWithoutSchedule(orgID, projectName, clusterName), + Config: configWithoutSchedule(clusterTerraformStr, clusterResourceName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(onlineArchiveResourceName, "partition_fields.0.field_name", "last_review"), ), @@ -121,12 +127,12 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { var ( - cluster matlas.Cluster - resourceName = "mongodbatlas_cluster.online_archive_test" + clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterResourceName = clusterInfo.ClusterResourceName + clusterName = clusterInfo.ClusterName + projectID = clusterInfo.ProjectID onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() - clusterName = acc.RandomClusterName() + clusterTerraformStr = clusterInfo.ClusterTerraformStr ) resource.ParallelTest(t, resource.TestCase{ @@ -135,15 +141,13 @@ func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - // We need this step to pupulate the cluster with Sample Data - // The online archive won't work if the cluster does not have data - Config: configFirstStep(orgID, projectName, clusterName), + Config: clusterTerraformStr, Check: resource.ComposeAggregateTestCheckFunc( - populateWithSampleData(resourceName, &cluster), + populateWithSampleData(clusterResourceName, projectID, clusterName), ), }, { - Config: configWithoutSchedule(orgID, projectName, clusterName), + Config: configWithoutSchedule(clusterTerraformStr, clusterResourceName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), @@ -151,7 +155,7 @@ func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { ), }, { - Config: configWithDailySchedule(orgID, projectName, clusterName, 1, 1), + Config: configWithDailySchedule(clusterTerraformStr, clusterResourceName, 1, 1), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "state"), resource.TestCheckResourceAttrSet(onlineArchiveResourceName, "archive_id"), @@ -169,13 +173,13 @@ func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { var ( - cluster matlas.Cluster - resourceName = "mongodbatlas_cluster.online_archive_test" onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() - clusterName = acc.RandomClusterName() + clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterResourceName = clusterInfo.ClusterResourceName + clusterName = clusterInfo.ClusterName + projectID = clusterInfo.ProjectID + clusterTerraformStr = clusterInfo.ClusterTerraformStr cloudProvider = "AWS" processRegion = "US_EAST_1" ) @@ -186,15 +190,13 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - // We need this step to pupulate the cluster with Sample Data - // The online archive won't work if the cluster does not have data - Config: configFirstStep(orgID, projectName, clusterName), + Config: clusterTerraformStr, Check: resource.ComposeAggregateTestCheckFunc( - populateWithSampleData(resourceName, &cluster), + populateWithSampleData(clusterResourceName, projectID, clusterName), ), }, { - Config: configWithDataProcessRegion(orgID, projectName, clusterName, cloudProvider, processRegion), + Config: configWithDataProcessRegion(clusterTerraformStr, clusterResourceName, cloudProvider, processRegion), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.cloud_provider", cloudProvider), resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.region", processRegion), @@ -203,11 +205,11 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { ), }, { - Config: configWithDataProcessRegion(orgID, projectName, clusterName, cloudProvider, "AP_SOUTH_1"), + Config: configWithDataProcessRegion(clusterTerraformStr, clusterResourceName, cloudProvider, "AP_SOUTH_1"), ExpectError: regexp.MustCompile("data_process_region can't be modified"), }, { - Config: configWithoutSchedule(orgID, projectName, clusterName), + Config: configWithoutSchedule(clusterTerraformStr, clusterResourceName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.cloud_provider", cloudProvider), resource.TestCheckResourceAttr(onlineArchiveResourceName, "data_process_region.0.region", processRegion), @@ -219,10 +221,10 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { func TestAccBackupRSOnlineArchiveInvalidProcessRegion(t *testing.T) { var ( - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() - clusterName = acc.RandomClusterName() - cloudProvider = "AWS" + clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterTerraformStr = clusterInfo.ClusterTerraformStr + cloudProvider = "AWS" + clusterResourceName = clusterInfo.ClusterResourceName ) resource.ParallelTest(t, resource.TestCase{ @@ -231,14 +233,15 @@ func TestAccBackupRSOnlineArchiveInvalidProcessRegion(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configWithDataProcessRegion(orgID, projectName, clusterName, cloudProvider, "UNKNOWN"), + Config: configWithDataProcessRegion(clusterTerraformStr, clusterResourceName, cloudProvider, "UNKNOWN"), ExpectError: regexp.MustCompile("INVALID_ATTRIBUTE"), }, }, }) } -func populateWithSampleData(resourceName string, cluster *matlas.Cluster) resource.TestCheckFunc { +// populateWithSampleData adds Sample Data to the cluster otherwise online archive won't work +func populateWithSampleData(resourceName, projectID, clusterName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -247,18 +250,18 @@ func populateWithSampleData(resourceName string, cluster *matlas.Cluster) resour if rs.Primary.ID == "" { return fmt.Errorf("no ID is set") } - ids := conversion.DecodeStateID(rs.Primary.ID) - log.Printf("[DEBUG] projectID: %s, name %s", ids["project_id"], ids["cluster_name"]) - clusterResp, _, err := acc.Conn().Clusters.Get(context.Background(), ids["project_id"], ids["cluster_name"]) + conn := acc.ConnV2() + ctx := context.Background() + _, _, err := conn.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { - return fmt.Errorf("cluster(%s:%s) does not exist %s", rs.Primary.Attributes["project_id"], rs.Primary.ID, err) + return fmt.Errorf("cluster(%s:%s) does not exist %s", projectID, clusterName, err) } - *cluster = *clusterResp - - job, _, err := acc.Conn().Clusters.LoadSampleDataset(context.Background(), ids["project_id"], ids["cluster_name"]) - + job, _, err := conn.ClustersApi.LoadSampleDataset(context.Background(), projectID, clusterName).Execute() if err != nil { - return fmt.Errorf("cluster(%s:%s) loading sample data set error %s", rs.Primary.Attributes["project_id"], rs.Primary.ID, err) + return fmt.Errorf("cluster(%s:%s) loading sample data set error %s", projectID, clusterName, err) + } + if job == nil { + return fmt.Errorf("cluster(%s:%s) loading sample data set error, no job found", projectID, clusterName) } ticker := time.NewTicker(30 * time.Second) @@ -268,26 +271,28 @@ func populateWithSampleData(resourceName string, cluster *matlas.Cluster) resour case <-time.After(20 * time.Second): log.Println("timeout elapsed ....") case <-ticker.C: - job, _, err = acc.Conn().Clusters.GetSampleDatasetStatus(context.Background(), ids["project_id"], job.ID) + job, _, err = conn.ClustersApi.GetSampleDatasetLoadStatus(ctx, projectID, job.GetId()).Execute() fmt.Println("querying for job ") - if job.State != "WORKING" { + if err != nil { + return fmt.Errorf("cluster(%s:%s) failed to query for job, %s", projectID, clusterName, err) + } + if job == nil { + return fmt.Errorf("cluster(%s:%s) failed to query for job, no job found", projectID, clusterName) + } + if job.GetState() != "WORKING" { break JOB } } } - if err != nil { - return fmt.Errorf("cluster(%s:%s) loading sample data set error %s", rs.Primary.Attributes["project_id"], rs.Primary.ID, err) - } - - if job.State != "COMPLETED" { - return fmt.Errorf("cluster(%s:%s) working sample data set error %s", rs.Primary.Attributes["project_id"], job.ID, job.State) + if job.GetState() != "COMPLETED" { + return fmt.Errorf("cluster(%s:%s) working sample data set error %s", projectID, job.GetId(), job.GetState()) } return nil } } -func configWithDailySchedule(orgID, projectName, clusterName string, startHour, deleteExpirationDays int) string { +func configWithDailySchedule(clusterTerraformStr, clusterResourceName string, startHour, deleteExpirationDays int) string { var dataExpirationRuleBlock string if deleteExpirationDays > 0 { dataExpirationRuleBlock = fmt.Sprintf(` @@ -300,8 +305,8 @@ func configWithDailySchedule(orgID, projectName, clusterName string, startHour, return fmt.Sprintf(` %[1]s resource "mongodbatlas_online_archive" "users_archive" { - project_id = mongodbatlas_cluster.online_archive_test.project_id - cluster_name = mongodbatlas_cluster.online_archive_test.name + project_id = %[4]s.project_id + cluster_name = %[4]s.name coll_name = "listingsAndReviews" collection_type = "STANDARD" db_name = "sample_airbnb" @@ -351,15 +356,15 @@ func configWithDailySchedule(orgID, projectName, clusterName string, startHour, project_id = mongodbatlas_online_archive.users_archive.project_id cluster_name = mongodbatlas_online_archive.users_archive.cluster_name } - `, configFirstStep(orgID, projectName, clusterName), startHour, dataExpirationRuleBlock) + `, clusterTerraformStr, startHour, dataExpirationRuleBlock, clusterResourceName) } -func configWithoutSchedule(orgID, projectName, clusterName string) string { +func configWithoutSchedule(clusterTerraformStr, clusterResourceName string) string { return fmt.Sprintf(` - %s + %[1]s resource "mongodbatlas_online_archive" "users_archive" { - project_id = mongodbatlas_cluster.online_archive_test.project_id - cluster_name = mongodbatlas_cluster.online_archive_test.name + project_id = %[2]s.project_id + cluster_name = %[2]s.name coll_name = "listingsAndReviews" collection_type = "STANDARD" db_name = "sample_airbnb" @@ -399,15 +404,15 @@ func configWithoutSchedule(orgID, projectName, clusterName string) string { project_id = mongodbatlas_online_archive.users_archive.project_id cluster_name = mongodbatlas_online_archive.users_archive.cluster_name } - `, configFirstStep(orgID, projectName, clusterName)) + `, clusterTerraformStr, clusterResourceName) } -func configWithDataProcessRegion(orgID, projectName, clusterName, cloudProvider, region string) string { +func configWithDataProcessRegion(clusterTerraformStr, clusterResourceName, cloudProvider, region string) string { return fmt.Sprintf(` - %s + %[1]s resource "mongodbatlas_online_archive" "users_archive" { - project_id = mongodbatlas_cluster.online_archive_test.project_id - cluster_name = mongodbatlas_cluster.online_archive_test.name + project_id = %[4]s.project_id + cluster_name = %[4]s.name coll_name = "listingsAndReviews" collection_type = "STANDARD" db_name = "sample_airbnb" @@ -452,58 +457,15 @@ func configWithDataProcessRegion(orgID, projectName, clusterName, cloudProvider, project_id = mongodbatlas_online_archive.users_archive.project_id cluster_name = mongodbatlas_online_archive.users_archive.cluster_name } - `, configFirstStep(orgID, projectName, clusterName), cloudProvider, region) -} - -func configFirstStep(orgID, projectName, clusterName string) string { - return fmt.Sprintf(` - resource "mongodbatlas_project" "cluster_project" { - name = %[2]q - org_id = %[1]q - } - resource "mongodbatlas_cluster" "online_archive_test" { - project_id = mongodbatlas_project.cluster_project.id - name = %[3]q - disk_size_gb = 10 - - cluster_type = "REPLICASET" - replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - - cloud_backup = false - auto_scaling_disk_gb_enabled = true - - // Provider Settings "block" - provider_name = "AWS" - provider_instance_size_name = "M10" - - labels { - key = "ArchiveTest" - value = "true" - } - labels { - key = "Owner" - value = "test" - } - } - - - `, orgID, projectName, clusterName) + `, clusterTerraformStr, cloudProvider, region, clusterResourceName) } -func testAccBackupRSOnlineArchiveConfigWithWeeklySchedule(orgID, projectName, clusterName string, startHour int) string { +func testAccBackupRSOnlineArchiveConfigWithWeeklySchedule(clusterTerraformStr, clusterResourceName string, startHour int) string { return fmt.Sprintf(` - %s + %[1]s resource "mongodbatlas_online_archive" "users_archive" { - project_id = mongodbatlas_cluster.online_archive_test.project_id - cluster_name = mongodbatlas_cluster.online_archive_test.name + project_id = %[3]s.project_id + cluster_name = %[3]s.name coll_name = "listingsAndReviews" collection_type = "STANDARD" db_name = "sample_airbnb" @@ -520,7 +482,7 @@ func testAccBackupRSOnlineArchiveConfigWithWeeklySchedule(orgID, projectName, cl day_of_week = 1 end_hour = 1 end_minute = 1 - start_hour = %d + start_hour = %[2]d start_minute = 1 } @@ -552,15 +514,15 @@ func testAccBackupRSOnlineArchiveConfigWithWeeklySchedule(orgID, projectName, cl project_id = mongodbatlas_online_archive.users_archive.project_id cluster_name = mongodbatlas_online_archive.users_archive.cluster_name } - `, configFirstStep(orgID, projectName, clusterName), startHour) + `, clusterTerraformStr, startHour, clusterResourceName) } -func testAccBackupRSOnlineArchiveConfigWithMonthlySchedule(orgID, projectName, clusterName string, startHour int) string { +func testAccBackupRSOnlineArchiveConfigWithMonthlySchedule(clusterTerraformStr, clusterResourceName string, startHour int) string { return fmt.Sprintf(` - %s + %[1]s resource "mongodbatlas_online_archive" "users_archive" { - project_id = mongodbatlas_cluster.online_archive_test.project_id - cluster_name = mongodbatlas_cluster.online_archive_test.name + project_id = %[3]s.project_id + cluster_name = %[3]s.name coll_name = "listingsAndReviews" collection_type = "STANDARD" db_name = "sample_airbnb" @@ -577,7 +539,7 @@ func testAccBackupRSOnlineArchiveConfigWithMonthlySchedule(orgID, projectName, c day_of_month = 1 end_hour = 1 end_minute = 1 - start_hour = %d + start_hour = %[2]d start_minute = 1 } @@ -611,5 +573,5 @@ func testAccBackupRSOnlineArchiveConfigWithMonthlySchedule(orgID, projectName, c project_id = mongodbatlas_online_archive.users_archive.project_id cluster_name = mongodbatlas_online_archive.users_archive.cluster_name } - `, configFirstStep(orgID, projectName, clusterName), startHour) + `, clusterTerraformStr, startHour, clusterResourceName) } diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index 286fee15e3..d895a5b160 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -10,6 +10,7 @@ import ( ) type ClusterRequest struct { + Tags map[string]string ResourceDependencyName string ClusterNameExplicit string ReplicationSpecs []ReplicationSpecRequest @@ -69,12 +70,13 @@ func ExistingClusterUsed() bool { } type ReplicationSpecRequest struct { - ZoneName string - Region string - InstanceSize string - ProviderName string - ExtraRegionConfigs []ReplicationSpecRequest - NodeCount int + ZoneName string + Region string + InstanceSize string + ProviderName string + ExtraRegionConfigs []ReplicationSpecRequest + NodeCount int + AutoScalingDiskGbEnabled bool } func (r *ReplicationSpecRequest) AddDefaults() { @@ -126,5 +128,8 @@ func CloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { InstanceSize: &req.InstanceSize, NodeCount: &req.NodeCount, }, + AutoScaling: &admin.AdvancedAutoScalingSettings{ + DiskGB: &admin.DiskGBAutoScaling{Enabled: &req.AutoScalingDiskGbEnabled}, + }, } } diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index 6385fc9182..aab67c7835 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -116,6 +116,14 @@ func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clust return "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) } } + if len(req.Tags) > 0 { + for _, key := range sortStringMapKeys(req.Tags) { + value := req.Tags[key] + tagBlock := cluster.AppendNewBlock("tags", nil).Body() + tagBlock.SetAttributeValue("key", cty.StringVal(key)) + tagBlock.SetAttributeValue("value", cty.StringVal(value)) + } + } cluster.AppendNewline() if req.ResourceDependencyName != "" { if !strings.Contains(req.ResourceDependencyName, ".") { @@ -150,7 +158,11 @@ func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) er autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(false)) } else { autoScaling := rc.GetAutoScaling() - return fmt.Errorf("auto_scaling on replication spec is not supportd yet %v", autoScaling) + asDisk := autoScaling.GetDiskGB() + autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(asDisk.GetEnabled())) + if autoScaling.Compute != nil { + return fmt.Errorf("auto_scaling.compute is not supportd yet %v", autoScaling) + } } nodeSpec := rc.GetElectableSpecs() nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 263f22ce9f..4d80dceb8e 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -303,6 +303,42 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { } ` +var autoScalingDiskEnabled = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = true + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + tags { + key = "ArchiveTest" + value = "true" + } + tags { + key = "Owner" + value = "test" + } + +} +` + func Test_ClusterResourceHcl(t *testing.T) { var ( clusterName = "my-name" @@ -347,6 +383,14 @@ func Test_ClusterResourceHcl(t *testing.T) { }, }, }, + "autoScalingDiskEnabled": { + autoScalingDiskEnabled, + acc.ClusterRequest{ClusterNameExplicit: clusterName, Tags: map[string]string{ + "ArchiveTest": "true", "Owner": "test", + }, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {AutoScalingDiskGbEnabled: true}, + }}, + }, } ) for name, tc := range testCases { From c2b384bda25d9a6857acd19925d3aa8a07fe4600 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Fri, 12 Jul 2024 16:59:05 +0200 Subject: [PATCH 30/84] update .tool-versions (#2417) --- .tool-versions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.tool-versions b/.tool-versions index 4277997d39..c23da88ab6 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -golang 1.22.4 -terraform 1.9.0 +golang 1.22.5 +terraform 1.9.2 From 749292ce3361d2fa0bd268a3b0321c40f77c8d0d Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Sun, 14 Jul 2024 16:04:04 +0200 Subject: [PATCH 31/84] feat: Adds `stored_source` attribute to `mongodbatlas_search_index` resource and corresponding data sources (#2388) * fix ds schemas * add changelog * add storedSource to configBasic and checkBasic * update doc about index_id * update boolean test * first implementation of stored_source as string * create model file * marshal * don't allow update * test for objects in stored_source * TestAccSearchIndex_withStoredSourceUpdate * update StoredSource * fix merge * tests for storedSource updates * swap test names * doc --- .changelog/2388.txt | 11 ++ .../searchindex/data_source_search_index.go | 42 +++-- .../searchindex/data_source_search_indexes.go | 10 +- .../service/searchindex/model_search_index.go | 151 ++++++++++++++++ .../searchindex/resource_search_index.go | 168 ++++-------------- .../resource_search_index_migration_test.go | 1 + .../searchindex/resource_search_index_test.go | 146 ++++++++++++--- website/docs/d/search_index.html.markdown | 4 +- website/docs/d/search_indexes.html.markdown | 5 +- website/docs/r/search_index.html.markdown | 61 ++++--- 10 files changed, 388 insertions(+), 211 deletions(-) create mode 100644 .changelog/2388.txt create mode 100644 internal/service/searchindex/model_search_index.go diff --git a/.changelog/2388.txt b/.changelog/2388.txt new file mode 100644 index 0000000000..14807c8714 --- /dev/null +++ b/.changelog/2388.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/mongodbatlas_search_index: Adds attribute `stored_source` +``` + +```release-note:enhancement +data-source/mongodbatlas_search_index: Adds attribute `stored_source` +``` + +```release-note:enhancement +data-source/mongodbatlas_search_indexes: Adds attribute `stored_source` +``` diff --git a/internal/service/searchindex/data_source_search_index.go b/internal/service/searchindex/data_source_search_index.go index 3283ff1c8c..495e7033e6 100644 --- a/internal/service/searchindex/data_source_search_index.go +++ b/internal/service/searchindex/data_source_search_index.go @@ -32,37 +32,35 @@ func returnSearchIndexDSSchema() map[string]*schema.Schema { }, "analyzer": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "analyzers": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: validateSearchAnalyzersDiff, + Type: schema.TypeString, + Computed: true, }, "collection_name": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "database": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "name": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "search_analyzer": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "mappings_dynamic": { Type: schema.TypeBool, - Optional: true, + Computed: true, }, "mappings_fields": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: validateSearchIndexMappingDiff, + Type: schema.TypeString, + Computed: true, }, "synonyms": { Type: schema.TypeSet, @@ -90,12 +88,15 @@ func returnSearchIndexDSSchema() map[string]*schema.Schema { }, "type": { Type: schema.TypeString, - Optional: true, + Computed: true, }, "fields": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: validateSearchIndexMappingDiff, + Type: schema.TypeString, + Computed: true, + }, + "stored_source": { + Type: schema.TypeString, + Computed: true, }, } } @@ -185,6 +186,15 @@ func dataSourceMongoDBAtlasSearchIndexRead(ctx context.Context, d *schema.Resour } } + storedSource := searchIndex.LatestDefinition.GetStoredSource() + strStoredSource, errStoredSource := MarshalStoredSource(storedSource) + if errStoredSource != nil { + return diag.FromErr(errStoredSource) + } + if err := d.Set("stored_source", strStoredSource); err != nil { + return diag.Errorf("error setting `stored_source` for search index (%s): %s", d.Id(), err) + } + d.SetId(conversion.EncodeStateID(map[string]string{ "project_id": projectID.(string), "cluster_name": clusterName.(string), diff --git a/internal/service/searchindex/data_source_search_indexes.go b/internal/service/searchindex/data_source_search_indexes.go index 63a272af64..3cfd89f617 100644 --- a/internal/service/searchindex/data_source_search_indexes.go +++ b/internal/service/searchindex/data_source_search_indexes.go @@ -35,7 +35,7 @@ func PluralDataSource() *schema.Resource { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ - Schema: returnSearchIndexSchema(), + Schema: returnSearchIndexDSSchema(), }, }, "total_count": { @@ -131,7 +131,13 @@ func flattenSearchIndexes(searchIndexes []admin.SearchIndexResponse, projectID, } searchIndexesMap[i]["fields"] = fieldsMarshaled } - } + storedSource := searchIndexes[i].LatestDefinition.GetStoredSource() + strStoredSource, errStoredSource := MarshalStoredSource(storedSource) + if errStoredSource != nil { + return nil, errStoredSource + } + searchIndexesMap[i]["stored_source"] = strStoredSource + } return searchIndexesMap, nil } diff --git a/internal/service/searchindex/model_search_index.go b/internal/service/searchindex/model_search_index.go new file mode 100644 index 0000000000..4fcb07b7a8 --- /dev/null +++ b/internal/service/searchindex/model_search_index.go @@ -0,0 +1,151 @@ +package searchindex + +import ( + "bytes" + "context" + "encoding/json" + "log" + "strconv" + + "github.com/go-test/deep" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "go.mongodb.org/atlas-sdk/v20240530002/admin" +) + +func flattenSearchIndexSynonyms(synonyms []admin.SearchSynonymMappingDefinition) []map[string]any { + synonymsMap := make([]map[string]any, len(synonyms)) + for i, s := range synonyms { + synonymsMap[i] = map[string]any{ + "name": s.Name, + "analyzer": s.Analyzer, + "source_collection": s.Source.Collection, + } + } + return synonymsMap +} + +func expandSearchIndexSynonyms(d *schema.ResourceData) []admin.SearchSynonymMappingDefinition { + var synonymsList []admin.SearchSynonymMappingDefinition + if vSynonyms, ok := d.GetOk("synonyms"); ok { + for _, s := range vSynonyms.(*schema.Set).List() { + synonym := s.(map[string]any) + synonymsDoc := admin.SearchSynonymMappingDefinition{ + Name: synonym["name"].(string), + Analyzer: synonym["analyzer"].(string), + Source: admin.SynonymSource{ + Collection: synonym["source_collection"].(string), + }, + } + synonymsList = append(synonymsList, synonymsDoc) + } + } + return synonymsList +} + +func marshalSearchIndex(fields any) (string, error) { + respBytes, err := json.Marshal(fields) + return string(respBytes), err +} + +func unmarshalSearchIndexMappingFields(str string) (map[string]any, diag.Diagnostics) { + fields := map[string]any{} + if str == "" { + return fields, nil + } + if err := json.Unmarshal([]byte(str), &fields); err != nil { + return nil, diag.Errorf("cannot unmarshal search index attribute `mappings_fields` because it has an incorrect format") + } + return fields, nil +} + +func unmarshalSearchIndexFields(str string) ([]map[string]any, diag.Diagnostics) { + fields := []map[string]any{} + if str == "" { + return fields, nil + } + if err := json.Unmarshal([]byte(str), &fields); err != nil { + return nil, diag.Errorf("cannot unmarshal search index attribute `fields` because it has an incorrect format") + } + + return fields, nil +} + +func unmarshalSearchIndexAnalyzersFields(str string) ([]admin.AtlasSearchAnalyzer, diag.Diagnostics) { + fields := []admin.AtlasSearchAnalyzer{} + if str == "" { + return fields, nil + } + dec := json.NewDecoder(bytes.NewReader([]byte(str))) + dec.DisallowUnknownFields() + if err := dec.Decode(&fields); err != nil { + return nil, diag.Errorf("cannot unmarshal search index attribute `analyzers` because it has an incorrect format") + } + return fields, nil +} + +func MarshalStoredSource(obj any) (string, error) { + if obj == nil { + return "", nil + } + if b, ok := obj.(bool); ok { + return strconv.FormatBool(b), nil + } + respBytes, err := json.Marshal(obj) + return string(respBytes), err +} + +func UnmarshalStoredSource(str string) (any, diag.Diagnostics) { + switch str { + case "": + return any(nil), nil + case "true": + return true, nil + case "false": + return false, nil + default: + var obj any + if err := json.Unmarshal([]byte(str), &obj); err != nil { + return nil, diag.Errorf("cannot unmarshal search index attribute `stored_source` because it has an incorrect format") + } + return obj, nil + } +} + +func diffSuppressJSON(k, old, newStr string, d *schema.ResourceData) bool { + var j, j2 any + + if old == "" { + old = "{}" + } + + if newStr == "" { + newStr = "{}" + } + + if err := json.Unmarshal([]byte(old), &j); err != nil { + log.Printf("[ERROR] cannot unmarshal old search index analyzer json %v", err) + } + if err := json.Unmarshal([]byte(newStr), &j2); err != nil { + log.Printf("[ERROR] cannot unmarshal new search index analyzer json %v", err) + } + if diff := deep.Equal(&j, &j2); diff != nil { + log.Printf("[DEBUG] deep equal not passed: %v", diff) + return false + } + + return true +} + +func resourceSearchIndexRefreshFunc(ctx context.Context, clusterName, projectID, indexID string, connV2 *admin.APIClient) retry.StateRefreshFunc { + return func() (any, string, error) { + searchIndex, _, err := connV2.AtlasSearchApi.GetAtlasSearchIndex(ctx, projectID, clusterName, indexID).Execute() + if err != nil { + return nil, "ERROR", err + } + status := conversion.SafeString(searchIndex.Status) + return searchIndex, status, nil + } +} diff --git a/internal/service/searchindex/resource_search_index.go b/internal/service/searchindex/resource_search_index.go index da544e5b27..0139101588 100644 --- a/internal/service/searchindex/resource_search_index.go +++ b/internal/service/searchindex/resource_search_index.go @@ -1,16 +1,13 @@ package searchindex import ( - "bytes" "context" - "encoding/json" "errors" "fmt" "log" "strings" "time" - "github.com/go-test/deep" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -64,7 +61,7 @@ func returnSearchIndexSchema() map[string]*schema.Schema { "analyzers": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: validateSearchAnalyzersDiff, + DiffSuppressFunc: diffSuppressJSON, }, "collection_name": { Type: schema.TypeString, @@ -89,7 +86,7 @@ func returnSearchIndexSchema() map[string]*schema.Schema { "mappings_fields": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: validateSearchIndexMappingDiff, + DiffSuppressFunc: diffSuppressJSON, }, "synonyms": { Type: schema.TypeSet, @@ -126,7 +123,12 @@ func returnSearchIndexSchema() map[string]*schema.Schema { "fields": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: validateSearchIndexMappingDiff, + DiffSuppressFunc: diffSuppressJSON, + }, + "stored_source": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: diffSuppressJSON, }, } } @@ -258,6 +260,14 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. searchIndex.Definition.Synonyms = &synonyms } + if d.HasChange("stored_source") { + obj, err := UnmarshalStoredSource(d.Get("stored_source").(string)) + if err != nil { + return err + } + searchIndex.Definition.StoredSource = obj + } + if _, _, err := connV2.AtlasSearchApi.UpdateAtlasSearchIndex(ctx, projectID, clusterName, indexID, searchIndex).Execute(); err != nil { return diag.Errorf("error updating search index (%s): %s", indexName, err) } @@ -372,24 +382,16 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di } } - return nil -} - -func flattenSearchIndexSynonyms(synonyms []admin.SearchSynonymMappingDefinition) []map[string]any { - synonymsMap := make([]map[string]any, len(synonyms)) - for i, s := range synonyms { - synonymsMap[i] = map[string]any{ - "name": s.Name, - "analyzer": s.Analyzer, - "source_collection": s.Source.Collection, - } + storedSource := searchIndex.LatestDefinition.GetStoredSource() + strStoredSource, errStoredSource := MarshalStoredSource(storedSource) + if errStoredSource != nil { + return diag.FromErr(errStoredSource) + } + if err := d.Set("stored_source", strStoredSource); err != nil { + return diag.Errorf("error setting `stored_source` for search index (%s): %s", d.Id(), err) } - return synonymsMap -} -func marshalSearchIndex(fields any) (string, error) { - respBytes, err := json.Marshal(fields) - return string(respBytes), err + return nil } func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { @@ -433,6 +435,12 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. searchIndexRequest.Definition.Synonyms = &synonyms } + objStoredSource, errStoredSource := UnmarshalStoredSource(d.Get("stored_source").(string)) + if errStoredSource != nil { + return errStoredSource + } + searchIndexRequest.Definition.StoredSource = objStoredSource + dbSearchIndexRes, _, err := connV2.AtlasSearchApi.CreateAtlasSearchIndex(ctx, projectID, clusterName, searchIndexRequest).Execute() if err != nil { return diag.Errorf("error creating index: %s", err) @@ -470,119 +478,3 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return resourceRead(ctx, d, meta) } - -func expandSearchIndexSynonyms(d *schema.ResourceData) []admin.SearchSynonymMappingDefinition { - var synonymsList []admin.SearchSynonymMappingDefinition - if vSynonyms, ok := d.GetOk("synonyms"); ok { - for _, s := range vSynonyms.(*schema.Set).List() { - synonym := s.(map[string]any) - synonymsDoc := admin.SearchSynonymMappingDefinition{ - Name: synonym["name"].(string), - Analyzer: synonym["analyzer"].(string), - Source: admin.SynonymSource{ - Collection: synonym["source_collection"].(string), - }, - } - synonymsList = append(synonymsList, synonymsDoc) - } - } - return synonymsList -} - -func validateSearchIndexMappingDiff(k, old, newStr string, d *schema.ResourceData) bool { - var j, j2 any - - if old == "" { - old = "{}" - } - - if newStr == "" { - newStr = "{}" - } - - if err := json.Unmarshal([]byte(old), &j); err != nil { - log.Printf("[ERROR] cannot unmarshal old search index mapping json %v", err) - } - if err := json.Unmarshal([]byte(newStr), &j2); err != nil { - log.Printf("[ERROR] cannot unmarshal new search index mapping json %v", err) - } - if diff := deep.Equal(&j, &j2); diff != nil { - log.Printf("[DEBUG] deep equal not passed: %v", diff) - return false - } - - return true -} - -func validateSearchAnalyzersDiff(k, old, newStr string, d *schema.ResourceData) bool { - var j, j2 any - - if old == "" { - old = "{}" - } - - if newStr == "" { - newStr = "{}" - } - - if err := json.Unmarshal([]byte(old), &j); err != nil { - log.Printf("[ERROR] cannot unmarshal old search index analyzer json %v", err) - } - if err := json.Unmarshal([]byte(newStr), &j2); err != nil { - log.Printf("[ERROR] cannot unmarshal new search index analyzer json %v", err) - } - if diff := deep.Equal(&j, &j2); diff != nil { - log.Printf("[DEBUG] deep equal not passed: %v", diff) - return false - } - - return true -} - -func unmarshalSearchIndexMappingFields(str string) (map[string]any, diag.Diagnostics) { - fields := map[string]any{} - if str == "" { - return fields, nil - } - if err := json.Unmarshal([]byte(str), &fields); err != nil { - return nil, diag.Errorf("cannot unmarshal search index attribute `mappings_fields` because it has an incorrect format") - } - return fields, nil -} - -func unmarshalSearchIndexFields(str string) ([]map[string]any, diag.Diagnostics) { - fields := []map[string]any{} - if str == "" { - return fields, nil - } - if err := json.Unmarshal([]byte(str), &fields); err != nil { - return nil, diag.Errorf("cannot unmarshal search index attribute `fields` because it has an incorrect format") - } - - return fields, nil -} - -func unmarshalSearchIndexAnalyzersFields(str string) ([]admin.AtlasSearchAnalyzer, diag.Diagnostics) { - fields := []admin.AtlasSearchAnalyzer{} - if str == "" { - return fields, nil - } - dec := json.NewDecoder(bytes.NewReader([]byte(str))) - dec.DisallowUnknownFields() - - if err := dec.Decode(&fields); err != nil { - return nil, diag.Errorf("cannot unmarshal search index attribute `analyzers` because it has an incorrect format") - } - return fields, nil -} - -func resourceSearchIndexRefreshFunc(ctx context.Context, clusterName, projectID, indexID string, connV2 *admin.APIClient) retry.StateRefreshFunc { - return func() (any, string, error) { - searchIndex, _, err := connV2.AtlasSearchApi.GetAtlasSearchIndex(ctx, projectID, clusterName, indexID).Execute() - if err != nil { - return nil, "ERROR", err - } - status := conversion.SafeString(searchIndex.Status) - return searchIndex, status, nil - } -} diff --git a/internal/service/searchindex/resource_search_index_migration_test.go b/internal/service/searchindex/resource_search_index_migration_test.go index 0cc1138662..a131d500ff 100644 --- a/internal/service/searchindex/resource_search_index_migration_test.go +++ b/internal/service/searchindex/resource_search_index_migration_test.go @@ -7,6 +7,7 @@ import ( ) func TestMigSearchIndex_basic(t *testing.T) { + mig.SkipIfVersionBelow(t, "1.17.4") mig.CreateAndRunTest(t, basicTestCase(t)) } diff --git a/internal/service/searchindex/resource_search_index_test.go b/internal/service/searchindex/resource_search_index_test.go index 4600a2cb0a..6bb1a76db2 100644 --- a/internal/service/searchindex/resource_search_index_test.go +++ b/internal/service/searchindex/resource_search_index_test.go @@ -28,8 +28,8 @@ func TestAccSearchIndex_withSearchType(t *testing.T) { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configBasic(projectID, clusterName, indexName, "search", databaseName), - Check: checkBasic(projectID, clusterName, indexName, "search", databaseName), + Config: configBasic(projectID, clusterName, indexName, "search", databaseName, ""), + Check: checkBasic(projectID, clusterName, indexName, "search", databaseName, ""), }, }, }) @@ -163,11 +163,11 @@ func basicTestCase(tb testing.TB) *resource.TestCase { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configBasic(projectID, clusterName, indexName, "", databaseName), - Check: checkBasic(projectID, clusterName, indexName, "", databaseName), + Config: configBasic(projectID, clusterName, indexName, "", databaseName, ""), + Check: checkBasic(projectID, clusterName, indexName, "", databaseName, ""), }, { - Config: configBasic(projectID, clusterName, indexName, "", databaseName), + Config: configBasic(projectID, clusterName, indexName, "", databaseName, ""), ResourceName: resourceName, ImportStateIdFunc: importStateIDFunc(resourceName), ImportState: true, @@ -177,6 +177,74 @@ func basicTestCase(tb testing.TB) *resource.TestCase { } } +func TestAccSearchIndex_withStoredSourceFalse(t *testing.T) { + resource.ParallelTest(t, *storedSourceTestCase(t, "false")) +} + +func TestAccSearchIndex_withStoredSourceTrue(t *testing.T) { + resource.ParallelTest(t, *storedSourceTestCase(t, "true")) +} + +func TestAccSearchIndex_withStoredSourceInclude(t *testing.T) { + resource.ParallelTest(t, *storedSourceTestCase(t, storedSourceIncludeJSON)) +} + +func TestAccSearchIndex_withStoredSourceExclude(t *testing.T) { + resource.ParallelTest(t, *storedSourceTestCase(t, storedSourceExcludeJSON)) +} + +func TestAccSearchIndex_withStoredSourceUpdateEmptyType(t *testing.T) { + resource.ParallelTest(t, *storedSourceTestCaseUpdate(t, "")) +} + +func TestAccSearchIndex_withStoredSourceUpdateSearchType(t *testing.T) { + resource.ParallelTest(t, *storedSourceTestCaseUpdate(t, "search")) +} + +func storedSourceTestCase(tb testing.TB, storedSource string) *resource.TestCase { + tb.Helper() + var ( + projectID, clusterName = acc.ClusterNameExecution(tb) + indexName = acc.RandomName() + databaseName = acc.RandomName() + ) + return &resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(tb) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroySearchIndex, + Steps: []resource.TestStep{ + { + Config: configBasic(projectID, clusterName, indexName, "search", databaseName, storedSource), + Check: checkBasic(projectID, clusterName, indexName, "search", databaseName, storedSource), + }, + }, + } +} + +func storedSourceTestCaseUpdate(tb testing.TB, searchType string) *resource.TestCase { + tb.Helper() + var ( + projectID, clusterName = acc.ClusterNameExecution(tb) + indexName = acc.RandomName() + databaseName = acc.RandomName() + ) + return &resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(tb) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroySearchIndex, + Steps: []resource.TestStep{ + { + Config: configBasic(projectID, clusterName, indexName, searchType, databaseName, "false"), + Check: checkBasic(projectID, clusterName, indexName, searchType, databaseName, "false"), + }, + { + Config: configBasic(projectID, clusterName, indexName, searchType, databaseName, "true"), + Check: checkBasic(projectID, clusterName, indexName, searchType, databaseName, "true"), + }, + }, + } +} + func basicVectorTestCase(tb testing.TB) *resource.TestCase { tb.Helper() var ( @@ -238,11 +306,19 @@ func checkExists(resourceName string) resource.TestCheckFunc { } } -func configBasic(projectID, clusterName, indexName, indexType, databaseName string) string { - var indexTypeStr string +func configBasic(projectID, clusterName, indexName, indexType, databaseName, storedSource string) string { + var extra string if indexType != "" { - indexTypeStr = fmt.Sprintf("type=%q", indexType) + extra += fmt.Sprintf("type=%q\n", indexType) } + if storedSource != "" { + if storedSource == "true" || storedSource == "false" { + extra += fmt.Sprintf("stored_source=%q\n", storedSource) + } else { + extra += fmt.Sprintf("stored_source= <<-EOF\n%s\nEOF\n", storedSource) + } + } + return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { cluster_name = %[1]q @@ -260,12 +336,22 @@ func configBasic(projectID, clusterName, indexName, indexType, databaseName stri project_id = mongodbatlas_search_index.test.project_id index_id = mongodbatlas_search_index.test.index_id } - `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, indexTypeStr) + `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, extra) } -func checkBasic(projectID, clusterName, indexName, indexType, databaseName string) resource.TestCheckFunc { +func checkBasic(projectID, clusterName, indexName, indexType, databaseName, storedSource string) resource.TestCheckFunc { mappingsDynamic := "true" - return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic) + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttr(resourceName, "stored_source", storedSource), + resource.TestCheckResourceAttr(datasourceName, "stored_source", storedSource), + } + if storedSource != "" && storedSource != "true" && storedSource != "false" { + checks = []resource.TestCheckFunc{ + resource.TestCheckResourceAttrWith(resourceName, "stored_source", acc.JSONEquals(storedSource)), + resource.TestCheckResourceAttrWith(datasourceName, "stored_source", acc.JSONEquals(storedSource)), + } + } + return checkAggr(projectID, clusterName, indexName, indexType, databaseName, mappingsDynamic, checks...) } func configWithMapping(projectID, indexName, databaseName, clusterName string) string { @@ -472,7 +558,21 @@ const ( ] } ] -` + ` + + incorrectFormatAnalyzersJSON = ` + [ + { + "wrongField":[ + { + "type":"length", + "min":20, + "max":33 + } + ] + } + ] + ` mappingsFieldsJSON = ` { @@ -516,17 +616,15 @@ const ( }] ` - incorrectFormatAnalyzersJSON = ` - [ - { - "wrongField":[ - { - "type":"length", - "min":20, - "max":33 - } - ] - } - ] + storedSourceIncludeJSON = ` + { + "include": ["include1","include2"] + } + ` + + storedSourceExcludeJSON = ` + { + "exclude": ["exclude1", "exclude2"] + } ` ) diff --git a/website/docs/d/search_index.html.markdown b/website/docs/d/search_index.html.markdown index ebe0b7fc81..2eae237422 100644 --- a/website/docs/d/search_index.html.markdown +++ b/website/docs/d/search_index.html.markdown @@ -45,8 +45,6 @@ data "mongodbatlas_search_index" "test" { * `synonyms.#.name` - Name of the [synonym mapping definition](https://docs.atlas.mongodb.com/reference/atlas-search/synonyms/#std-label-synonyms-ref). * `synonyms.#.source_collection` - Name of the source MongoDB collection for the synonyms. * `synonyms.#.analyzer` - Name of the [analyzer](https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/#std-label-analyzers-ref) to use with this synonym mapping. - - - +* `stored_source` - String that can be "true" (store all fields), "false" (default, don't store any field), or a JSON string that contains the list of fields to store (include) or not store (exclude) on Atlas Search. To learn more, see [Stored Source Fields](https://www.mongodb.com/docs/atlas/atlas-search/stored-source-definition/). For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/atlas-search/) - [and MongoDB Atlas API - Search](https://docs.atlas.mongodb.com/reference/api/atlas-search/) Documentation for more information. diff --git a/website/docs/d/search_indexes.html.markdown b/website/docs/d/search_indexes.html.markdown index 6f31eca1f1..84b346244b 100644 --- a/website/docs/d/search_indexes.html.markdown +++ b/website/docs/d/search_indexes.html.markdown @@ -37,6 +37,7 @@ data "mongodbatlas_search_indexes" "test" { ### Results +* `index_id` - The unique identifier of the Atlas Search index. * `name` - Name of the index. * `status` - Current status of the index. * `analyzer` - [Analyzer](https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/#std-label-analyzers-ref) to use when creating the index. @@ -50,8 +51,6 @@ data "mongodbatlas_search_indexes" "test" { * `synonyms.#.name` - Name of the [synonym mapping definition](https://docs.atlas.mongodb.com/reference/atlas-search/synonyms/#std-label-synonyms-ref). * `synonyms.#.source_collection` - Name of the source MongoDB collection for the synonyms. * `synonyms.#.analyzer` - Name of the [analyzer](https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/#std-label-analyzers-ref) to use with this synonym mapping. - - - +* `stored_source` - String that can be "true" (store all fields), "false" (default, don't store any field), or a JSON string that contains the list of fields to store (include) or not store (exclude) on Atlas Search. To learn more, see [Stored Source Fields](https://www.mongodb.com/docs/atlas/atlas-search/stored-source-definition/). For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/atlas-search/) - [and MongoDB Atlas API - Search](https://docs.atlas.mongodb.com/reference/api/atlas-search/) Documentation for more information. diff --git a/website/docs/r/search_index.html.markdown b/website/docs/r/search_index.html.markdown index 2630213f75..b695c7ca1c 100644 --- a/website/docs/r/search_index.html.markdown +++ b/website/docs/r/search_index.html.markdown @@ -162,35 +162,36 @@ EOF ```terraform mappings_fields = <<-EOF { - "address": { - "type": "document", - "fields": { - "city": { - "type": "string", - "analyzer": "lucene.simple", - "ignoreAbove": 255 - }, - "state": { - "type": "string", - "analyzer": "lucene.english" + "address": { + "type": "document", + "fields": { + "city": { + "type": "string", + "analyzer": "lucene.simple", + "ignoreAbove": 255 + }, + "state": { + "type": "string", + "analyzer": "lucene.english" + } } - } - }, - "company": { - "type": "string", - "analyzer": "lucene.whitespace", - "multi": { - "mySecondaryAnalyzer": { - "type": "string", - "analyzer": "lucene.french" + }, + "company": { + "type": "string", + "analyzer": "lucene.whitespace", + "multi": { + "mySecondaryAnalyzer": { + "type": "string", + "analyzer": "lucene.french" + } } - } - }, - "employees": { - "type": "string", - "analyzer": "lucene.standard" + }, + "employees": { + "type": "string", + "analyzer": "lucene.standard" } } + EOF ``` * `search_analyzer` - [Analyzer](https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/#std-label-analyzers-ref) to use when searching the index. Defaults to [lucene.standard](https://docs.atlas.mongodb.com/reference/atlas-search/analyzers/standard/#std-label-ref-standard-analyzer) @@ -198,10 +199,20 @@ EOF * `fields` - Array of [Fields](https://www.mongodb.com/docs/atlas/atlas-search/field-types/knn-vector/#std-label-fts-data-types-knn-vector) to configure this `vectorSearch` index. It is mandatory for vector searches and it must contain at least one `vector` type field. This field needs to be a JSON string in order to be decoded correctly. +* `stored_source` - String that can be "true" (store all fields), "false" (default, don't store any field), or a JSON string that contains the list of fields to store (include) or not store (exclude) on Atlas Search. To learn more, see [Stored Source Fields](https://www.mongodb.com/docs/atlas/atlas-search/stored-source-definition/). + ```terraform + stored_source = <<-EOF + { + "include": ["field1", "field2"] + } + EOF + ``` + ## Attributes Reference In addition to all arguments above, the following attributes are exported: +* `index_id` - The unique identifier of the Atlas Search index. * `status` - Current status of the index. ### Analyzers (search index) From 011e6a2848f353cd59a7d7245868e3be81c550c6 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Sun, 14 Jul 2024 14:06:14 +0000 Subject: [PATCH 32/84] chore: Updates CHANGELOG.md for #2388 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56116aa948..c1e23726ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## (Unreleased) +ENHANCEMENTS: + +* data-source/mongodbatlas_search_index: Adds attribute `stored_source` ([#2388](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2388)) +* data-source/mongodbatlas_search_indexes: Adds attribute `stored_source` ([#2388](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2388)) +* resource/mongodbatlas_search_index: Adds attribute `stored_source` ([#2388](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2388)) + BUG FIXES: * resource/mongodbatlas_advanced_cluster: Fixes `disk_iops` attribute for Azure cloud provider ([#2396](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2396)) From d0c711346c9e9329c632a29b440a771eeb993a6f Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Mon, 15 Jul 2024 09:30:22 +0200 Subject: [PATCH 33/84] doc: Improves Guides menu (#2408) * add 0.8.2 metadata * update old category and remove unneeded headers * update page_title * fix titles * remove old guide --- RELEASING.md | 6 + .../guides/0.6.0-upgrade-guide.html.markdown | 8 +- .../guides/0.8.0-upgrade-guide.html.markdown | 8 +- .../guides/0.8.2-upgrade-guide.html.markdown | 9 +- .../guides/0.9.0-upgrade-guide.html.markdown | 8 +- .../guides/0.9.1-upgrade-guide.html.markdown | 7 +- .../guides/1.0.0-upgrade-guide.html.markdown | 7 +- .../guides/1.0.1-upgrade-guide.html.markdown | 7 +- .../guides/1.1.0-upgrade-guide.html.markdown | 7 +- .../guides/1.10.0-upgrade-guide.html.markdown | 7 +- .../guides/1.11.0-upgrade-guide.html.markdown | 6 +- .../guides/1.12.0-upgrade-guide.html.markdown | 6 +- .../guides/1.13.0-upgrade-guide.html.markdown | 6 +- .../guides/1.14.0-upgrade-guide.html.markdown | 6 +- .../guides/1.15.0-upgrade-guide.html.markdown | 6 +- .../guides/1.16.0-upgrade-guide.html.markdown | 6 +- .../guides/1.17.0-upgrade-guide.html.markdown | 6 +- .../guides/1.2.0-upgrade-guide.html.markdown | 7 +- .../guides/1.3.0-upgrade-guide.html.markdown | 7 +- .../guides/1.4.0-upgrade-guide.html.markdown | 7 +- .../guides/1.5.0-upgrade-guide.html.markdown | 7 +- .../guides/1.6.0-upgrade-guide.html.markdown | 7 +- .../guides/1.7.0-upgrade-guide.html.markdown | 7 +- .../guides/1.8.0-upgrade-guide.html.markdown | 7 +- .../guides/1.9.0-upgrade-guide.html.markdown | 7 +- ...API-Key-upgrade-guide-1.10.0.html.markdown | 7 +- website/docs/guides/howto-guide.html.markdown | 107 ------------------ 27 files changed, 55 insertions(+), 231 deletions(-) delete mode 100644 website/docs/guides/howto-guide.html.markdown diff --git a/RELEASING.md b/RELEASING.md index 3cbc3d19e4..8d3aef4b8d 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -9,6 +9,12 @@ - If some deprecated attributes need to be removed in the following release, create a Jira ticket and merge the corresponding PR before starting the release workflow. You can search in the code for the constansts in [deprecation.go](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/internal/common/constant/deprecation.go) to find them. +### Move old guides + +**Note**: Only applies if the right most version digit is 0 (considered a major or minor version in [semantic versioning](https://semver.org/)). + +- Keep only [Guides](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/master/website/docs/guides) for 12 months. Previous versions must be added in the header `subcategory: "Older Guides"`. + ### Revise jira release Before triggering a release, view the corresponding [unreleased jira page](https://jira.mongodb.org/projects/CLOUDP?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page&status=unreleased&contains=terraform) to ensure there are no pending tickets. In case there are pending tickets, verify with the team if the expectation is to have them included within the current release. After release workflow is successful the version will be marked as released automatically. diff --git a/website/docs/guides/0.6.0-upgrade-guide.html.markdown b/website/docs/guides/0.6.0-upgrade-guide.html.markdown index 2dd24ae9d9..f8afc7ba5b 100644 --- a/website/docs/guides/0.6.0-upgrade-guide.html.markdown +++ b/website/docs/guides/0.6.0-upgrade-guide.html.markdown @@ -1,10 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 0.6.0: Upgrade Guide" -sidebar_current: "docs-mongodbatlas-guides-060-upgrade-guide" -description: |- - MongoDB Atlas Provider 0.6.0: Upgrade Guide - +page_title: "Upgrade Guide 0.6.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 0.6.0: Upgrade Guide diff --git a/website/docs/guides/0.8.0-upgrade-guide.html.markdown b/website/docs/guides/0.8.0-upgrade-guide.html.markdown index fd782f6830..4ed1e4974f 100644 --- a/website/docs/guides/0.8.0-upgrade-guide.html.markdown +++ b/website/docs/guides/0.8.0-upgrade-guide.html.markdown @@ -1,10 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 0.8.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-080-upgrade-guide" -description: |- - MongoDB Atlas Provider 0.8.0: Upgrade and Information Guide - +page_title: "Upgrade Guide 0.8.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider v0.8.0: Upgrade and Information Guide diff --git a/website/docs/guides/0.8.2-upgrade-guide.html.markdown b/website/docs/guides/0.8.2-upgrade-guide.html.markdown index d0741793ed..3b152e9d4f 100644 --- a/website/docs/guides/0.8.2-upgrade-guide.html.markdown +++ b/website/docs/guides/0.8.2-upgrade-guide.html.markdown @@ -1,4 +1,11 @@ -## 0.8.2 Upgrade Guide for Privatelink users +--- +page_title: "Upgrade Guide 0.8.2" +subcategory: "Older Guides" +--- + +# MongoDB Atlas Provider v0.8.2: Upgrade and Information Guide + +## Upgrade Guide for Privatelink users ### Resources are impacted that were created with versions ***v0.8.0/v0.8.1*** ### Fixed in [#398](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/398) diff --git a/website/docs/guides/0.9.0-upgrade-guide.html.markdown b/website/docs/guides/0.9.0-upgrade-guide.html.markdown index 2afa51cd85..9337516895 100644 --- a/website/docs/guides/0.9.0-upgrade-guide.html.markdown +++ b/website/docs/guides/0.9.0-upgrade-guide.html.markdown @@ -1,10 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 0.9.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-090-upgrade-guide" -description: |- - MongoDB Atlas Provider 0.9.0: Upgrade and Information Guide - +page_title: "Upgrade Guide 0.9.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider v0.9.0: Upgrade and Information Guide diff --git a/website/docs/guides/0.9.1-upgrade-guide.html.markdown b/website/docs/guides/0.9.1-upgrade-guide.html.markdown index 7d6dfcb342..093bd52671 100644 --- a/website/docs/guides/0.9.1-upgrade-guide.html.markdown +++ b/website/docs/guides/0.9.1-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 0.9.1: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-091-upgrade-guide" -description: |- - MongoDB Atlas Provider 0.9.1: Upgrade and Information Guide +page_title: "Upgrade Guide 0.9.1" +subcategory: "Older Guides" --- # MongoDB Atlas Provider v0.9.1: Upgrade and Information Guide diff --git a/website/docs/guides/1.0.0-upgrade-guide.html.markdown b/website/docs/guides/1.0.0-upgrade-guide.html.markdown index 76daa8ce30..e87b53d822 100644 --- a/website/docs/guides/1.0.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.0.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.0.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-100-upgrade-guide" -description: |- -MongoDB Atlas Provider 0.1.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.0.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.0.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.0.1-upgrade-guide.html.markdown b/website/docs/guides/1.0.1-upgrade-guide.html.markdown index 077af1caaa..9f50f47b53 100644 --- a/website/docs/guides/1.0.1-upgrade-guide.html.markdown +++ b/website/docs/guides/1.0.1-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.0.1: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-101-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.0.1: Upgrade and Information Guide +page_title: "Upgrade Guide 1.0.1" +subcategory: "Older Guides" --- # MongoDB Atlas Provider v1.0.1: Upgrade and Information Guide diff --git a/website/docs/guides/1.1.0-upgrade-guide.html.markdown b/website/docs/guides/1.1.0-upgrade-guide.html.markdown index 0e479426d0..fdc673dc0d 100644 --- a/website/docs/guides/1.1.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.1.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.1.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-110-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.1.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.1.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.1.0/1.1.1: Upgrade and Information Guide diff --git a/website/docs/guides/1.10.0-upgrade-guide.html.markdown b/website/docs/guides/1.10.0-upgrade-guide.html.markdown index 74c6ca645b..a35cfaeee3 100644 --- a/website/docs/guides/1.10.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.10.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.10.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1100-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.10.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.10.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.10.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.11.0-upgrade-guide.html.markdown b/website/docs/guides/1.11.0-upgrade-guide.html.markdown index 5437cfb71e..83597a9f8d 100644 --- a/website/docs/guides/1.11.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.11.0-upgrade-guide.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.11.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1110-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.11.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.11.0" --- # MongoDB Atlas Provider 1.11.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.12.0-upgrade-guide.html.markdown b/website/docs/guides/1.12.0-upgrade-guide.html.markdown index ebccb06159..98f63a3aea 100644 --- a/website/docs/guides/1.12.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.12.0-upgrade-guide.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.12.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1120-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.12.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.12.0" --- # MongoDB Atlas Provider 1.12.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.13.0-upgrade-guide.html.markdown b/website/docs/guides/1.13.0-upgrade-guide.html.markdown index db59bab421..fcd697d4f1 100644 --- a/website/docs/guides/1.13.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.13.0-upgrade-guide.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.13.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1130-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.13.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.13.0" --- # MongoDB Atlas Provider 1.13.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.14.0-upgrade-guide.html.markdown b/website/docs/guides/1.14.0-upgrade-guide.html.markdown index b584817d21..eb1422dd59 100644 --- a/website/docs/guides/1.14.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.14.0-upgrade-guide.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.14.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1140-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.14.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.14.0" --- # MongoDB Atlas Provider 1.14.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.15.0-upgrade-guide.html.markdown b/website/docs/guides/1.15.0-upgrade-guide.html.markdown index 12a9cf8a59..95dd886a1b 100644 --- a/website/docs/guides/1.15.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.15.0-upgrade-guide.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.15.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1150-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.15.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.15.0" --- # MongoDB Atlas Provider 1.15.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.16.0-upgrade-guide.html.markdown b/website/docs/guides/1.16.0-upgrade-guide.html.markdown index 9702d35a43..e93e7ddbb8 100644 --- a/website/docs/guides/1.16.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.16.0-upgrade-guide.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.16.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1160-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.16.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.16.0" --- # MongoDB Atlas Provider 1.16.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.17.0-upgrade-guide.html.markdown b/website/docs/guides/1.17.0-upgrade-guide.html.markdown index 40536dc7f5..56e931c4b6 100644 --- a/website/docs/guides/1.17.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.17.0-upgrade-guide.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.17.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-1170-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.17.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.17.0" --- # MongoDB Atlas Provider 1.17.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.2.0-upgrade-guide.html.markdown b/website/docs/guides/1.2.0-upgrade-guide.html.markdown index 7c349a0f7f..f46f18af2d 100644 --- a/website/docs/guides/1.2.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.2.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.2.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-120-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.2.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.2.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.2.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.3.0-upgrade-guide.html.markdown b/website/docs/guides/1.3.0-upgrade-guide.html.markdown index a15108703c..34e4d6b692 100644 --- a/website/docs/guides/1.3.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.3.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.3.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-130-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.3.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.3.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.3.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.4.0-upgrade-guide.html.markdown b/website/docs/guides/1.4.0-upgrade-guide.html.markdown index f39efd2a41..ac38a87f62 100644 --- a/website/docs/guides/1.4.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.4.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.4.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-140-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.4.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.4.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.4.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.5.0-upgrade-guide.html.markdown b/website/docs/guides/1.5.0-upgrade-guide.html.markdown index f9d14e12ac..2e305bd48f 100644 --- a/website/docs/guides/1.5.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.5.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.5.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-150-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.5.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.5.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.5.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.6.0-upgrade-guide.html.markdown b/website/docs/guides/1.6.0-upgrade-guide.html.markdown index bead7c2ba5..57dd04b2c2 100644 --- a/website/docs/guides/1.6.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.6.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.6.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-160-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.6.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.6.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.6.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.7.0-upgrade-guide.html.markdown b/website/docs/guides/1.7.0-upgrade-guide.html.markdown index 1526910f07..f29a56002c 100644 --- a/website/docs/guides/1.7.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.7.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.7.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-170-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.7.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.7.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.7.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.8.0-upgrade-guide.html.markdown b/website/docs/guides/1.8.0-upgrade-guide.html.markdown index 1313cda516..a10c0ac787 100644 --- a/website/docs/guides/1.8.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.8.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.8.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-180-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.8.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.8.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.8.0: Upgrade and Information Guide diff --git a/website/docs/guides/1.9.0-upgrade-guide.html.markdown b/website/docs/guides/1.9.0-upgrade-guide.html.markdown index 508f708560..cd5133a922 100644 --- a/website/docs/guides/1.9.0-upgrade-guide.html.markdown +++ b/website/docs/guides/1.9.0-upgrade-guide.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider 1.9.0: Upgrade and Information Guide" -sidebar_current: "docs-mongodbatlas-guides-190-upgrade-guide" -description: |- -MongoDB Atlas Provider 1.9.0: Upgrade and Information Guide +page_title: "Upgrade Guide 1.9.0" +subcategory: "Older Guides" --- # MongoDB Atlas Provider 1.9.0: Upgrade and Information Guide diff --git a/website/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.html.markdown b/website/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.html.markdown index 38a206791f..eec249e566 100644 --- a/website/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.html.markdown +++ b/website/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.html.markdown @@ -1,9 +1,6 @@ --- -layout: "mongodbatlas" -page_title: "Upgrade Guide for Terraform MongoDB Atlas Provider Programmatic API Key Resource in v1.10.0" -sidebar_current: "docs-mongodbatlas-guides-Programmatic-API-Key-upgrade-guide" -description: |- -MongoDB Atlas Provider : Upgrade and Information Guide +page_title: "Upgrade Guide 1.10.0 for Programmatic API Key" +subcategory: "Older Guides" --- # MongoDB Atlas Provider: Programmatic API Key Upgrade Guide in v1.10.0 diff --git a/website/docs/guides/howto-guide.html.markdown b/website/docs/guides/howto-guide.html.markdown deleted file mode 100644 index 6e835d1722..0000000000 --- a/website/docs/guides/howto-guide.html.markdown +++ /dev/null @@ -1,107 +0,0 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas Provider How-To Guide" -sidebar_current: "docs-mongodbatlas-guides-how-to-guide" -description: |- -MongoDB Atlas Provider : How-To Guide ---- - -# MongoDB Atlas Provider: How-To Guide - -The Terraform MongoDB Atlas Provider guide to perform common tasks with the provider. - -##How to Get A Pre-existing Container ID - -The following is an end to end example of how to get an existing container id. - -1) Start with an empty project - -2) Empty state file - -3) Apply a curl command to build cluster - -4) Run `terraform apply` to retrieve the container id - -The following illustrates step 3 and 4 above, assuming 1 & 2 were true: - -1) Create a cluster using a curl command to simulate non-Terraform created cluster. This will also create a container. - -``` -curl --user "pub:priv" --digest \ ---header "Content-Type: application/json" \ ---include \ ---request POST "https://cloud.mongodb.com/api/atlas/v1.0/groups/grpid/clusters?pretty=true" \ ---data ' -{ - "name": "SingleRegionCluster", - "numShards": 1, - "providerSettings": { - "providerName": "AWS", - "instanceSizeName": "M40", - "regionName": "US_EAST_1" - }, - "clusterType": "REPLICASET", - "replicationFactor": 3, - "replicationSpecs": [ - { - "numShards": 1, - "regionsConfig": { - "US_EAST_1": { - "analyticsNodes": 0, - "electableNodes": 3, - "priority": 7, - "readOnlyNodes": 0 - } - }, - "zoneName": "Zone 1" - } - ], - "backupEnabled": false, - "autoScaling": { - "diskGBEnabled": true - } -}' -``` - - - -2) Then apply this Terraform config to then read the information from the appropriate Data Sources and output the container id. - - -``` -data "mongodbatlas_cluster" "admin" { - name = "SingleRegionCluster" - project_id = local.mongodbatlas_project_id -} - -data "mongodbatlas_network_container" "admin" { - project_id = local.mongodbatlas_project_id - container_id = data.mongodbatlas_cluster.admin.container_id -} - -output "container" { - value = data.mongodbatlas_network_container.admin.container_id -} - -Apply complete! Resources: 0 added, 0 changed, 0 destroyed. - -Outputs: - -container = "62ffe4ecb79e2e007c375935" -``` - - -This example was tested using versions: -- darwin_amd64 -- provider registry.terraform.io/hashicorp/aws v4.26.0 -- provider registry.terraform.io/mongodb/mongodbatlas v1.4.3 - - -### Helpful Links - -* [Report bugs](https://github.com/mongodb/terraform-provider-mongodbatlas/issues) - -* [Request Features](https://feedback.mongodb.com/forums/924145-atlas?category_id=370723) - -* [Contact Support](https://docs.atlas.mongodb.com/support/) covered by MongoDB Atlas support plans, Developer and above. - \ No newline at end of file From e2b3a961792d1b8daf325481423c929c63edf66c Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Mon, 15 Jul 2024 09:33:29 +0100 Subject: [PATCH 34/84] test: Refactors resource tests to use GetClusterInfo `ldap_configuration` (#2411) * test: Refactors resource tests to use GetClusterInfo ldap_configuration * test: Fix depends_on clause * test: remove unused clusterName and align fields --- .../resource_ldap_configuration_test.go | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go index 7db034a5dd..f9eeba0eac 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go @@ -30,8 +30,14 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { password = os.Getenv("MONGODB_ATLAS_LDAP_PASSWORD") port = os.Getenv("MONGODB_ATLAS_LDAP_PORT") caCertificate = os.Getenv("MONGODB_ATLAS_LDAP_CA_CERTIFICATE") - projectID = acc.ProjectIDExecution(t) - clusterName = acc.RandomClusterName() + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{ + CloudBackup: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_EAST_2"}, + }, + }) + projectID = clusterInfo.ProjectID + clusterTerraformStr = clusterInfo.ClusterTerraformStr ) resource.Test(t, resource.TestCase{ @@ -39,7 +45,7 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configWithVerify(projectID, clusterName, hostname, username, password, caCertificate, cast.ToInt(port), true), + Config: configWithVerify(clusterTerraformStr, clusterInfo.ClusterResourceName, projectID, hostname, username, password, caCertificate, cast.ToInt(port), true), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -155,40 +161,33 @@ func configBasic(projectID, hostname, username, password string, authEnabled boo `, projectID, hostname, username, password, authEnabled, port) } -func configWithVerify(projectID, clusterName, hostname, username, password, caCertificate string, port int, authEnabled bool) string { +func configWithVerify(clusterTerraformStr, clusterResourceName, projectID, hostname, username, password, caCertificate string, port int, authEnabled bool) string { return fmt.Sprintf(` - resource "mongodbatlas_cluster" "test" { - project_id = %[1]q - name = %[2]q - provider_name = "AWS" - provider_region_name = "US_EAST_2" - provider_instance_size_name = "M10" - cloud_backup = true //enable cloud provider snapshots - } +%[8]s resource "mongodbatlas_ldap_verify" "test" { - project_id = %[1]q - hostname = %[3]q - bind_username = %[4]q - bind_password = %[5]q - port = %[6]d + project_id = %[1]q + hostname = %[2]q + bind_username = %[3]q + bind_password = %[4]q + port = %[5]d ca_certificate = <<-EOF -%[8]s +%[7]s EOF authz_query_template = "{USER}?memberOf?base" - depends_on = [mongodbatlas_cluster.test] + depends_on = [%[9]s] } resource "mongodbatlas_ldap_configuration" "test" { - project_id = %[1]q - authorization_enabled = false - hostname = %[3]q - bind_username = %[4]q - bind_password = %[5]q - port = %[6]d - authentication_enabled = %[7]t + project_id = %[1]q + authorization_enabled = false + hostname = %[2]q + bind_username = %[3]q + bind_password = %[4]q + port = %[5]d + authentication_enabled = %[6]t ca_certificate = <<-EOF -%[8]s +%[7]s EOF authz_query_template = "{USER}?memberOf?base" user_to_dn_mapping{ @@ -196,5 +195,5 @@ func configWithVerify(projectID, clusterName, hostname, username, password, caCe ldap_query = "DC=example,DC=com??sub?(userPrincipalName={0})" } depends_on = [mongodbatlas_ldap_verify.test] - }`, projectID, clusterName, hostname, username, password, port, authEnabled, caCertificate) + }`, projectID, hostname, username, password, port, authEnabled, caCertificate, clusterTerraformStr, clusterResourceName) } From 4c9c392ab6f15848efa612f9b5e36d0afb641397 Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Mon, 15 Jul 2024 09:34:42 +0100 Subject: [PATCH 35/84] test: Refactors resource tests to use GetClusterInfo `cloud_backup_snapshot_restore_job` (#2413) * test: Refactors resource tests to use GetClusterInfo `cloud_backup_snapshot_restore_job` * test: fix reference to clusterResourceName --- ..._cloud_backup_snapshot_restore_job_test.go | 70 ++++++++----------- 1 file changed, 30 insertions(+), 40 deletions(-) diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go index 188b026b33..25b1dea41f 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go @@ -18,17 +18,26 @@ const ( dataSourceName = "data.mongodbatlas_cloud_backup_snapshot_restore_job.test" ) +var clusterRequest = acc.ClusterRequest{ + CloudBackup: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_WEST_2"}, + }, +} + func TestAccCloudBackupSnapshotRestoreJob_basic(t *testing.T) { resource.ParallelTest(t, *basicTestCase(t)) } func TestAccCloudBackupSnapshotRestoreJob_basicDownload(t *testing.T) { var ( - projectID = acc.ProjectIDExecution(t) - clusterName = acc.RandomClusterName() - description = fmt.Sprintf("My description in %s", clusterName) - retentionInDays = "1" - useSnapshotID = true + clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterName = clusterInfo.ClusterName + description = fmt.Sprintf("My description in %s", clusterName) + retentionInDays = "1" + useSnapshotID = true + clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterResourceName = clusterInfo.ClusterResourceName ) resource.ParallelTest(t, resource.TestCase{ @@ -37,14 +46,14 @@ func TestAccCloudBackupSnapshotRestoreJob_basicDownload(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configDownload(projectID, clusterName, description, retentionInDays, useSnapshotID), + Config: configDownload(clusterTerraformStr, clusterResourceName, description, retentionInDays, useSnapshotID), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_type_config.0.download", "true"), ), }, { - Config: configDownload(projectID, clusterName, description, retentionInDays, !useSnapshotID), + Config: configDownload(clusterTerraformStr, clusterResourceName, description, retentionInDays, !useSnapshotID), ExpectError: regexp.MustCompile("SNAPSHOT_NOT_FOUND"), }, }, @@ -57,8 +66,8 @@ func basicTestCase(tb testing.TB) *resource.TestCase { var ( snapshotsDataSourceName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.test" snapshotsDataSourcePaginationName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.pagination" - projectID = acc.ProjectIDExecution(tb) - clusterName = acc.RandomClusterName() + clusterInfo = acc.GetClusterInfo(tb, &clusterRequest) + clusterName = clusterInfo.ClusterName description = fmt.Sprintf("My description in %s", clusterName) retentionInDays = "1" ) @@ -69,7 +78,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configBasic(projectID, clusterName, description, retentionInDays), + Config: configBasic(clusterInfo.ClusterTerraformStr, clusterInfo.ClusterResourceName, description, retentionInDays), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_type_config.0.automated", "true"), @@ -139,25 +148,15 @@ func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { } } -func configBasic(projectID, clusterName, description, retentionInDays string) string { +func configBasic(terraformStr, clusterResourceName, description, retentionInDays string) string { return fmt.Sprintf(` - resource "mongodbatlas_cluster" "my_cluster" { - project_id = %[1]q - name = %[2]q - - // Provider Settings "block" - provider_name = "AWS" - provider_region_name = "US_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true - } - + %[1]s resource "mongodbatlas_cloud_backup_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = %[2]s.project_id + cluster_name = %[2]s.name description = %[3]q retention_in_days = %[4]q - depends_on = [mongodbatlas_cluster.my_cluster] + depends_on = [%[2]s] } resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { @@ -191,29 +190,20 @@ func configBasic(projectID, clusterName, description, retentionInDays string) st page_num = 1 items_per_page = 5 } - `, projectID, clusterName, description, retentionInDays) + `, terraformStr, clusterResourceName, description, retentionInDays) } -func configDownload(projectID, clusterName, description, retentionInDays string, useSnapshotID bool) string { +func configDownload(terraformStr, clusterResourceName, description, retentionInDays string, useSnapshotID bool) string { var snapshotIDField string if useSnapshotID { snapshotIDField = `snapshot_id = mongodbatlas_cloud_backup_snapshot.test.id` } return fmt.Sprintf(` - resource "mongodbatlas_cluster" "my_cluster" { - project_id = %[1]q - name = %[2]q - - provider_name = "AWS" - provider_region_name = "US_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud provider snapshots - } - + %[1]s resource "mongodbatlas_cloud_backup_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = %[2]s.project_id + cluster_name = %[2]s.name description = %[3]q retention_in_days = %[4]q } @@ -227,5 +217,5 @@ func configDownload(projectID, clusterName, description, retentionInDays string, download = true } } - `, projectID, clusterName, description, retentionInDays, snapshotIDField) + `, terraformStr, clusterResourceName, description, retentionInDays, snapshotIDField) } From c1cb5fd64becba089bfc8540153bb78c9f10f696 Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Mon, 15 Jul 2024 11:13:17 +0200 Subject: [PATCH 36/84] doc: Clarify usage of maintenance window resource (#2418) --- website/docs/r/maintenance_window.html.markdown | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/docs/r/maintenance_window.html.markdown b/website/docs/r/maintenance_window.html.markdown index af6c7b3519..75d5627ea3 100644 --- a/website/docs/r/maintenance_window.html.markdown +++ b/website/docs/r/maintenance_window.html.markdown @@ -8,7 +8,9 @@ description: |- # Resource: mongodbatlas_maintenance_window -`mongodbatlas_maintenance_window` provides a resource to schedule a maintenance window for your MongoDB Atlas Project and/or set to defer a scheduled maintenance up to two times. +`mongodbatlas_maintenance_window` provides a resource to schedule the maintenance window for your MongoDB Atlas Project and/or set to defer a scheduled maintenance up to two times. Please refer to [Maintenance Windows](https://www.mongodb.com/docs/atlas/tutorial/cluster-maintenance-window/#configure-maintenance-window) documentation for more details. + +-> **NOTE:** Only a single maintenance window resource can be defined per project. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. From 3110a528d62fee08ae223e5fa2a3f1d737617e2e Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Mon, 15 Jul 2024 10:36:58 +0100 Subject: [PATCH 37/84] test: Refactors resource tests to use GetClusterInfo `cloud_backup_schedule` (#2414) * test: Cluster support PitEnabled * test: Refactors resource tests to use GetClusterInfo `mongodbatlas_cloud_backup_schedule` * apply PR suggestions * test: fix broken test after merging --- .../resource_cloud_backup_schedule_test.go | 64 ++++++++----------- internal/testutil/acc/cluster.go | 1 + internal/testutil/acc/config_formatter.go | 1 + .../testutil/acc/config_formatter_test.go | 9 ++- 4 files changed, 36 insertions(+), 39 deletions(-) diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index c9dff0becc..66f13b6235 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -252,9 +252,18 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { var ( - projectID = acc.ProjectIDExecution(t) - clusterName = acc.RandomClusterName() - checkMap = map[string]string{ + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{ + CloudBackup: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_EAST_2"}, + }, + PitEnabled: true, // you cannot copy oplogs when pit is not enabled + }) + clusterName = clusterInfo.ClusterName + terraformStr = clusterInfo.ClusterTerraformStr + clusterResourceName = clusterInfo.ClusterResourceName + projectID = clusterInfo.ProjectID + checkMap = map[string]string{ "cluster_name": clusterName, "reference_hour_of_day": "3", "reference_minute_of_hour": "45", @@ -300,7 +309,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(projectID, clusterName, false, &admin.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, &admin.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -308,7 +317,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc(checksCreate...), }, { - Config: configCopySettings(projectID, clusterName, true, &admin.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, &admin.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -525,10 +534,10 @@ func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) s `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configCopySettings(projectID, clusterName string, emptyCopySettings bool, p *admin.DiskBackupSnapshotSchedule) string { +func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings bool, p *admin.DiskBackupSnapshotSchedule) string { var copySettings string if !emptyCopySettings { - copySettings = ` + copySettings = fmt.Sprintf(` copy_settings { cloud_provider = "AWS" frequencies = ["HOURLY", @@ -538,40 +547,19 @@ func configCopySettings(projectID, clusterName string, emptyCopySettings bool, p "YEARLY", "ON_DEMAND"] region_name = "US_EAST_1" - replication_spec_id = mongodbatlas_cluster.my_cluster.replication_specs.*.id[0] + replication_spec_id = %[1]s.replication_specs.*.id[0] should_copy_oplogs = true - }` + }`, clusterResourceName) } return fmt.Sprintf(` - resource "mongodbatlas_cluster" "my_cluster" { - project_id = %[1]q - name = %[2]q - - cluster_type = "REPLICASET" - replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - // Provider Settings "block" - provider_name = "AWS" - provider_region_name = "US_EAST_2" - provider_instance_size_name = "M10" - cloud_backup = true //enable cloud provider snapshots - pit_enabled = true // enable point in time restore. you cannot copy oplogs when pit is not enabled. - } - + %[1]s resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { - project_id = %[1]q - cluster_name = %[2]q + project_id = %[2]q + cluster_name = %[3]s.name - reference_hour_of_day = %[3]d - reference_minute_of_hour = %[4]d - restore_window_days = %[5]d + reference_hour_of_day = %[4]d + reference_minute_of_hour = %[5]d + restore_window_days = %[6]d policy_item_hourly { frequency_interval = 1 @@ -598,9 +586,9 @@ func configCopySettings(projectID, clusterName string, emptyCopySettings bool, p retention_unit = "years" retention_value = 1 } - %s + %[7]s } - `, projectID, clusterName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings) + `, terraformStr, projectID, clusterResourceName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings) } func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index d895a5b160..369c62e72c 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -17,6 +17,7 @@ type ClusterRequest struct { DiskSizeGb int CloudBackup bool Geosharded bool + PitEnabled bool } type ClusterInfo struct { diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index aab67c7835..ca6aa99308 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -104,6 +104,7 @@ func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clust "cluster_type": clusterTypeStr, "name": clusterName, "backup_enabled": req.CloudBackup, + "pit_enabled": req.PitEnabled, } if req.DiskSizeGb != 0 { clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 4d80dceb8e..f8d8887395 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -112,6 +112,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { backup_enabled = false cluster_type = "REPLICASET" name = "my-name" + pit_enabled = false project_id = "project" replication_specs { @@ -139,6 +140,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { backup_enabled = true cluster_type = "GEOSHARDED" name = "my-name" + pit_enabled = true project_id = "project" replication_specs { @@ -167,6 +169,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { backup_enabled = false cluster_type = "REPLICASET" name = "my-name" + pit_enabled = false project_id = "project" replication_specs { @@ -195,6 +198,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { backup_enabled = false cluster_type = "REPLICASET" name = "my-name" + pit_enabled = false project_id = "project" replication_specs { @@ -223,6 +227,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { backup_enabled = false cluster_type = "REPLICASET" name = "my-name" + pit_enabled = false project_id = "project" replication_specs { @@ -267,6 +272,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { backup_enabled = false cluster_type = "REPLICASET" name = "my-name" + pit_enabled = false project_id = "project" replication_specs { @@ -308,6 +314,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { backup_enabled = false cluster_type = "REPLICASET" name = "my-name" + pit_enabled = false project_id = "project" replication_specs { @@ -367,7 +374,7 @@ func Test_ClusterResourceHcl(t *testing.T) { }, "overrideClusterResource": { overrideClusterResource, - acc.ClusterRequest{ClusterNameExplicit: clusterName, Geosharded: true, CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{ + acc.ClusterRequest{ClusterNameExplicit: clusterName, Geosharded: true, PitEnabled: true, CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{ {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE}, }}, }, From 6674f82919e1f55b1515727c9f6428b564e42a6f Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Mon, 15 Jul 2024 11:01:20 +0100 Subject: [PATCH 38/84] test: Refactors resource tests to use GetClusterInfo `federated_database_instance` (#2412) * test: Support getting cluster info with project * test: Refactors resource tests to use GetClusterInfo `federated_database_instance` * test: refactor, use a single GetClusterInfo and support AddDefaults * test: use renamed argument in test --- ...source_federated_database_instance_test.go | 71 ++++++++----------- internal/testutil/acc/cluster.go | 53 +++++++++----- internal/testutil/acc/config_formatter.go | 31 ++++---- .../testutil/acc/config_formatter_test.go | 19 ++--- 4 files changed, 92 insertions(+), 82 deletions(-) diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go index 7c95aa741b..8dc6667b01 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go @@ -113,12 +113,23 @@ func TestAccFederatedDatabaseInstance_s3bucket(t *testing.T) { func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { var ( - resourceName = "mongodbatlas_federated_database_instance.test" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() - clusterName1 = acc.RandomClusterName() - clusterName2 = acc.RandomClusterName() - name = acc.RandomName() + specs = []acc.ReplicationSpecRequest{ + {Region: "EU_WEST_2"}, + } + clusterRequest = acc.ClusterRequest{ + ReplicationSpecs: specs, + } + resourceName = "mongodbatlas_federated_database_instance.test" + name = acc.RandomName() + clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + projectID = clusterInfo.ProjectID + clusterRequest2 = acc.ClusterRequest{ + ProjectID: projectID, + ReplicationSpecs: specs, + ResourceSuffix: "cluster2", + } + cluster2Info = acc.GetClusterInfo(t, &clusterRequest2) + dependencyTerraform = fmt.Sprintf("%s\n%s", clusterInfo.ClusterTerraformStr, cluster2Info.ClusterTerraformStr) ) resource.ParallelTest(t, resource.TestCase{ @@ -127,7 +138,7 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { Steps: []resource.TestStep{ { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - Config: configWithCluster(orgID, projectName, clusterName1, clusterName2, name), + Config: configWithCluster(dependencyTerraform, projectID, clusterInfo.ClusterResourceName, cluster2Info.ClusterResourceName, name), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), @@ -140,34 +151,12 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { }) } -func configWithCluster(orgID, projectName, clusterName1, clusterName2, name string) string { +func configWithCluster(terraformStr, projectID, cluster1ResourceName, cluster2ResourceName, name string) string { return fmt.Sprintf(` - resource "mongodbatlas_project" "project-tf" { - org_id = %[1]q - name = %[2]q - } - - resource "mongodbatlas_cluster" "cluster-1" { - project_id = mongodbatlas_project.project-tf.id - provider_name = "AWS" - name = %[3]q - backing_provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - } - - - resource "mongodbatlas_cluster" "cluster-2" { - project_id = mongodbatlas_project.project-tf.id - provider_name = "AWS" - name = %[4]q - backing_provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - } + %[1]s resource "mongodbatlas_federated_database_instance" "test" { - project_id = mongodbatlas_project.project-tf.id + project_id = %[2]q name = %[5]q storage_databases { name = "VirtualDatabase0" @@ -176,21 +165,21 @@ func configWithCluster(orgID, projectName, clusterName1, clusterName2, name stri data_sources { collection = "listingsAndReviews" database = "sample_airbnb" - store_name = mongodbatlas_cluster.cluster-1.name + store_name = %[3]s.name } data_sources { collection = "listingsAndReviews" database = "sample_airbnb" - store_name = mongodbatlas_cluster.cluster-2.name + store_name = %[4]s.name } } } storage_stores { - name = mongodbatlas_cluster.cluster-1.name - cluster_name = mongodbatlas_cluster.cluster-1.name - project_id = mongodbatlas_project.project-tf.id + name = %[3]s.name + cluster_name = %[3]s.name + project_id = %[2]q provider = "atlas" read_preference { mode = "secondary" @@ -218,9 +207,9 @@ func configWithCluster(orgID, projectName, clusterName1, clusterName2, name stri } storage_stores { - name = mongodbatlas_cluster.cluster-2.name - cluster_name = mongodbatlas_cluster.cluster-2.name - project_id = mongodbatlas_project.project-tf.id + name = %[4]s.name + cluster_name = %[4]s.name + project_id = %[2]q provider = "atlas" read_preference { mode = "secondary" @@ -247,7 +236,7 @@ func configWithCluster(orgID, projectName, clusterName1, clusterName2, name stri } } } - `, orgID, projectName, clusterName1, clusterName2, name) + `, terraformStr, projectID, cluster1ResourceName, cluster2ResourceName, name) } func importStateIDFuncS3Bucket(resourceName, s3Bucket string) resource.ImportStateIdFunc { diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index 369c62e72c..0786d03963 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -11,8 +11,10 @@ import ( type ClusterRequest struct { Tags map[string]string + ProjectID string + ResourceSuffix string ResourceDependencyName string - ClusterNameExplicit string + ClusterName string ReplicationSpecs []ReplicationSpecRequest DiskSizeGb int CloudBackup bool @@ -20,6 +22,18 @@ type ClusterRequest struct { PitEnabled bool } +func (r *ClusterRequest) AddDefaults() { + if r.ResourceSuffix == "" { + r.ResourceSuffix = defaultClusterResourceSuffix + } + if len(r.ReplicationSpecs) == 0 { + r.ReplicationSpecs = []ReplicationSpecRequest{{}} + } + if r.ClusterName == "" { + r.ClusterName = RandomClusterName() + } +} + type ClusterInfo struct { ProjectIDStr string ProjectID string @@ -29,6 +43,8 @@ type ClusterInfo struct { ClusterTerraformStr string } +const defaultClusterResourceSuffix = "cluster_info" + // GetClusterInfo is used to obtain a project and cluster configuration resource. // When `MONGODB_ATLAS_CLUSTER_NAME` and `MONGODB_ATLAS_PROJECT_ID` are defined, creation of resources is avoided. This is useful for local execution but not intended for CI executions. // Clusters will be created in project ProjectIDExecution. @@ -37,26 +53,26 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { if req == nil { req = new(ClusterRequest) } - clusterName := os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") - projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") - if clusterName != "" && projectID != "" { - return ClusterInfo{ - ProjectIDStr: fmt.Sprintf("%q", projectID), - ProjectID: projectID, - ClusterName: clusterName, - ClusterNameStr: fmt.Sprintf("%q", clusterName), - ClusterTerraformStr: "", + if req.ProjectID == "" { + if ExistingClusterUsed() { + projectID, clusterName := existingProjectIDClusterName() + return ClusterInfo{ + ProjectIDStr: fmt.Sprintf("%q", projectID), + ProjectID: projectID, + ClusterName: clusterName, + ClusterNameStr: fmt.Sprintf("%q", clusterName), + ClusterTerraformStr: "", + } } + req.ProjectID = ProjectIDExecution(tb) } - projectID = ProjectIDExecution(tb) - clusterTerraformStr, clusterName, err := ClusterResourceHcl(projectID, req) + clusterTerraformStr, clusterName, clusterResourceName, err := ClusterResourceHcl(req) if err != nil { tb.Error(err) } - clusterResourceName := "mongodbatlas_advanced_cluster.cluster_info" return ClusterInfo{ - ProjectIDStr: fmt.Sprintf("%q", projectID), - ProjectID: projectID, + ProjectIDStr: fmt.Sprintf("%q", req.ProjectID), + ProjectID: req.ProjectID, ClusterName: clusterName, ClusterNameStr: fmt.Sprintf("%s.name", clusterResourceName), ClusterResourceName: clusterResourceName, @@ -65,11 +81,14 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { } func ExistingClusterUsed() bool { - clusterName := os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") - projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") + projectID, clusterName := existingProjectIDClusterName() return clusterName != "" && projectID != "" } +func existingProjectIDClusterName() (projectID, clusterName string) { + return os.Getenv("MONGODB_ATLAS_PROJECT_ID"), os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") +} + type ReplicationSpecRequest struct { ZoneName string Region string diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index ca6aa99308..d2052ddfdb 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -2,6 +2,7 @@ package acc import ( "encoding/json" + "errors" "fmt" "regexp" "sort" @@ -75,22 +76,18 @@ func ToSnakeCase(str string) string { return strings.ToLower(snake) } -func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clusterName string, err error) { - if req == nil { - req = new(ClusterRequest) +func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { + if req == nil || req.ProjectID == "" { + return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID set") } + req.AddDefaults() specRequests := req.ReplicationSpecs - if len(specRequests) == 0 { - specRequests = append(specRequests, ReplicationSpecRequest{}) - } specs := make([]admin.ReplicationSpec, len(specRequests)) for i, specRequest := range specRequests { specs[i] = ReplicationSpec(&specRequest) } - clusterName = req.ClusterNameExplicit - if clusterName == "" { - clusterName = RandomClusterName() - } + clusterName = req.ClusterName + resourceSuffix := req.ResourceSuffix clusterTypeStr := "REPLICASET" if req.Geosharded { clusterTypeStr = "GEOSHARDED" @@ -98,9 +95,10 @@ func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clust f := hclwrite.NewEmptyFile() root := f.Body() - cluster := root.AppendNewBlock("resource", []string{"mongodbatlas_advanced_cluster", "cluster_info"}).Body() + resourceType := "mongodbatlas_advanced_cluster" + cluster := root.AppendNewBlock("resource", []string{resourceType, resourceSuffix}).Body() clusterRootAttributes := map[string]any{ - "project_id": projectID, + "project_id": req.ProjectID, "cluster_type": clusterTypeStr, "name": clusterName, "backup_enabled": req.CloudBackup, @@ -114,7 +112,7 @@ func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clust for i, spec := range specs { err = writeReplicationSpec(cluster, spec) if err != nil { - return "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) + return "", "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) } } if len(req.Tags) > 0 { @@ -128,14 +126,15 @@ func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clust cluster.AppendNewline() if req.ResourceDependencyName != "" { if !strings.Contains(req.ResourceDependencyName, ".") { - return "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") + return "", "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") } err = setAttributeHcl(cluster, fmt.Sprintf("depends_on = [%s]", req.ResourceDependencyName)) if err != nil { - return "", "", err + return "", "", "", err } } - return "\n" + string(f.Bytes()), clusterName, err + clusterResourceName := fmt.Sprintf("%s.%s", resourceType, resourceSuffix) + return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err } func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index f8d8887395..2c5f7b9283 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -355,32 +355,32 @@ func Test_ClusterResourceHcl(t *testing.T) { }{ "defaults": { standardClusterResource, - acc.ClusterRequest{ClusterNameExplicit: clusterName}, + acc.ClusterRequest{ClusterName: clusterName}, }, "dependsOn": { dependsOnClusterResource, - acc.ClusterRequest{ClusterNameExplicit: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, + acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, }, "dependsOnMulti": { dependsOnMultiResource, - acc.ClusterRequest{ClusterNameExplicit: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, + acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, }, "twoReplicationSpecs": { twoReplicationSpecs, - acc.ClusterRequest{ClusterNameExplicit: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ {Region: "US_WEST_1", ZoneName: "Zone 1"}, {Region: "EU_WEST_2", ZoneName: "Zone 2"}, }}, }, "overrideClusterResource": { overrideClusterResource, - acc.ClusterRequest{ClusterNameExplicit: clusterName, Geosharded: true, PitEnabled: true, CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{ + acc.ClusterRequest{ClusterName: clusterName, Geosharded: true, PitEnabled: true, CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{ {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE}, }}, }, "twoRegionConfigs": { twoRegionConfigs, - acc.ClusterRequest{ClusterNameExplicit: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ { Region: "US_WEST_1", InstanceSize: "M10", @@ -392,7 +392,7 @@ func Test_ClusterResourceHcl(t *testing.T) { }, "autoScalingDiskEnabled": { autoScalingDiskEnabled, - acc.ClusterRequest{ClusterNameExplicit: clusterName, Tags: map[string]string{ + acc.ClusterRequest{ClusterName: clusterName, Tags: map[string]string{ "ArchiveTest": "true", "Owner": "test", }, ReplicationSpecs: []acc.ReplicationSpecRequest{ {AutoScalingDiskGbEnabled: true}, @@ -402,8 +402,11 @@ func Test_ClusterResourceHcl(t *testing.T) { ) for name, tc := range testCases { t.Run(name, func(t *testing.T) { - config, actualClusterName, err := acc.ClusterResourceHcl("project", &tc.req) + req := tc.req + req.ProjectID = "project" + config, actualClusterName, actualResourceName, err := acc.ClusterResourceHcl(&req) require.NoError(t, err) + assert.Equal(t, "mongodbatlas_advanced_cluster.cluster_info", actualResourceName) assert.Equal(t, clusterName, actualClusterName) assert.Equal(t, tc.expected, config) }) From d1c25f0b265a29cd57a81780d5759b0ab4c37d23 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Mon, 15 Jul 2024 23:25:21 +0200 Subject: [PATCH 39/84] doc: Removes docs headers as they are not needed (#2422) * remove unneeded YAML frontmatter headers * small adjustements * change root files * remove from templates * use Deprecated category * apply feedback --- templates/data-source.md.tmpl | 8 -------- .../data-sources/control_plane_ip_addresses.md.tmpl | 8 -------- templates/data-sources/push_based_log_export.md.tmpl | 8 -------- templates/data-sources/search_deployment.md.tmpl | 8 -------- templates/resources.md.tmpl | 7 ------- templates/resources/push_based_log_export.md.tmpl | 8 -------- templates/resources/search_deployment.md.tmpl | 8 -------- website/docs/d/access_list_api_key.html.markdown | 8 -------- website/docs/d/access_list_api_keys.html.markdown | 8 -------- website/docs/d/advanced_cluster.html.markdown | 8 -------- website/docs/d/advanced_clusters.html.markdown | 8 -------- website/docs/d/alert_configuration.html.markdown | 8 -------- website/docs/d/alert_configurations.html.markdown | 8 -------- website/docs/d/api_key.html.markdown | 8 -------- website/docs/d/api_keys.html.markdown | 8 -------- website/docs/d/atlas_user.html.markdown | 8 -------- website/docs/d/atlas_users.html.markdown | 8 -------- website/docs/d/auditing.html.markdown | 8 -------- website/docs/d/backup_compliance_policy.html.markdown | 8 -------- website/docs/d/cloud_backup_schedule.html.markdown | 8 -------- website/docs/d/cloud_backup_snapshot.html.markdown | 8 -------- .../cloud_backup_snapshot_export_bucket.html.markdown | 9 +-------- ...cloud_backup_snapshot_export_buckets.html.markdown | 9 +-------- .../d/cloud_backup_snapshot_export_job.html.markdown | 9 +-------- .../d/cloud_backup_snapshot_export_jobs.html.markdown | 9 +-------- .../d/cloud_backup_snapshot_restore_job.html.markdown | 8 -------- .../cloud_backup_snapshot_restore_jobs.html.markdown | 8 -------- website/docs/d/cloud_backup_snapshots.html.markdown | 8 -------- website/docs/d/cloud_provider_access_setup.markdown | 8 -------- ...oud_provider_shared_tier_restore_job.html.markdown | 9 --------- ...ud_provider_shared_tier_restore_jobs.html.markdown | 8 -------- .../cloud_provider_shared_tier_snapshot.html.markdown | 8 -------- ...cloud_provider_shared_tier_snapshots.html.markdown | 8 -------- website/docs/d/cloud_provider_snapshot.html.markdown | 6 +----- ...loud_provider_snapshot_backup_policy.html.markdown | 7 +------ .../cloud_provider_snapshot_restore_job.html.markdown | 6 +----- ...cloud_provider_snapshot_restore_jobs.html.markdown | 6 +----- website/docs/d/cloud_provider_snapshots.html.markdown | 6 +----- website/docs/d/cluster.html.markdown | 8 -------- .../docs/d/cluster_outage_simulation.html.markdown | 8 -------- website/docs/d/clusters.html.markdown | 8 -------- .../docs/d/control_plane_ip_addresses.html.markdown | 11 +---------- website/docs/d/custom_db_role.html.markdown | 10 +--------- website/docs/d/custom_db_roles.html.markdown | 10 +--------- ...custom_dns_configuration_cluster_aws.html.markdown | 8 -------- website/docs/d/data_lake_pipeline.html.markdown | 8 -------- website/docs/d/data_lake_pipeline_run.html.markdown | 10 +--------- website/docs/d/data_lake_pipeline_runs.html.markdown | 10 +--------- website/docs/d/data_lake_pipelines.html.markdown | 8 -------- website/docs/d/database_user.html.markdown | 10 +--------- website/docs/d/database_users.html.markdown | 10 +--------- website/docs/d/event_trigger.html.markdown | 10 +--------- website/docs/d/event_triggers.html.markdown | 10 +--------- .../docs/d/federated_database_instance.html.markdown | 10 +--------- .../docs/d/federated_database_instances.html.markdown | 8 -------- website/docs/d/federated_query_limit.html.markdown | 10 +--------- website/docs/d/federated_query_limits.html.markdown | 8 -------- website/docs/d/federated_settings.html.markdown | 8 -------- ...federated_settings_identity_provider.html.markdown | 8 -------- ...ederated_settings_identity_providers.html.markdown | 8 -------- .../d/federated_settings_org_config.html.markdown | 8 -------- .../d/federated_settings_org_configs.html.markdown | 8 -------- .../federated_settings_org_role_mapping.html.markdown | 8 -------- ...federated_settings_org_role_mappings.html.markdown | 8 -------- website/docs/d/global_cluster_config.html.markdown | 8 -------- website/docs/d/ldap_configuration.html.markdown | 8 -------- website/docs/d/ldap_verify.html.markdown | 8 -------- website/docs/d/maintenance_window.html.markdown | 8 -------- website/docs/d/network_container.html.markdown | 8 -------- website/docs/d/network_containers.html.markdown | 8 -------- website/docs/d/network_peering.html.markdown | 8 -------- website/docs/d/network_peerings.html.markdown | 8 -------- website/docs/d/online_archive.html.markdown | 8 -------- website/docs/d/online_archives.html.markdown | 8 -------- website/docs/d/org_invitation.html.markdown | 8 -------- website/docs/d/organization.html.markdown | 10 +--------- website/docs/d/organizations.html.markdown | 10 +--------- .../d/private_endpoint_regional_mode.html.markdown | 10 +--------- website/docs/d/privatelink_endpoint.html.markdown | 10 +--------- .../docs/d/privatelink_endpoint_service.html.markdown | 10 +--------- ...rvice_data_federation_online_archive.html.markdown | 8 -------- ...vice_data_federation_online_archives.html.markdown | 8 -------- ...vatelink_endpoint_service_serverless.html.markdown | 11 +---------- .../d/privatelink_endpoints_service_adl.html.markdown | 10 +--------- ...atelink_endpoints_service_serverless.html.markdown | 11 +---------- website/docs/d/project.html.markdown | 8 -------- website/docs/d/project_api_key.html.markdown | 8 -------- website/docs/d/project_api_keys.html.markdown | 10 +--------- website/docs/d/project_invitation.html.markdown | 8 -------- website/docs/d/project_ip_access_list.html.markdown | 8 -------- website/docs/d/projects.html.markdown | 10 +--------- website/docs/d/push_based_log_export.html.markdown | 9 --------- website/docs/d/roles_org_id.html.markdown | 8 -------- website/docs/d/search_deployment.html.markdown | 9 --------- website/docs/d/search_index.html.markdown | 10 +--------- website/docs/d/search_indexes.html.markdown | 10 +--------- website/docs/d/serverless_instance.html.markdown | 10 +--------- website/docs/d/serverless_instances.html.markdown | 10 +--------- website/docs/d/stream_connection.html.markdown | 8 -------- website/docs/d/stream_connections.html.markdown | 8 -------- website/docs/d/stream_instance.html.markdown | 8 -------- website/docs/d/stream_instances.html.markdown | 8 -------- website/docs/d/team.html.markdown | 8 -------- website/docs/d/teams.html.markdown | 8 +++----- website/docs/d/third_party_integration.markdown | 10 +--------- website/docs/d/third_party_integrations.markdown | 10 +--------- .../d/x509_authentication_database_user.html.markdown | 10 +--------- website/docs/index.html.markdown | 8 -------- website/docs/r/access_list_api_key.html.markdown | 8 -------- website/docs/r/advanced_cluster.html.markdown | 8 -------- website/docs/r/alert_configuration.html.markdown | 8 -------- website/docs/r/api_key.html.markdown | 8 -------- website/docs/r/auditing.html.markdown | 8 -------- website/docs/r/backup_compliance_policy.html.markdown | 7 ------- website/docs/r/cloud_backup_schedule.html.markdown | 8 -------- website/docs/r/cloud_backup_snapshot.html.markdown | 8 -------- .../cloud_backup_snapshot_export_bucket.html.markdown | 11 ++--------- .../r/cloud_backup_snapshot_export_job.html.markdown | 11 ++--------- .../r/cloud_backup_snapshot_restore_job.html.markdown | 8 -------- website/docs/r/cloud_provider_access.markdown | 8 -------- website/docs/r/cloud_provider_snapshot.html.markdown | 6 +----- ...loud_provider_snapshot_backup_policy.html.markdown | 6 +----- .../cloud_provider_snapshot_restore_job.html.markdown | 6 +----- website/docs/r/cluster.html.markdown | 8 -------- .../docs/r/cluster_outage_simulation.html.markdown | 8 -------- website/docs/r/custom_db_role.html.markdown | 8 -------- .../r/custom_dns_configuration_cluster_aws.markdown | 8 -------- website/docs/r/data_lake_pipeline.html.markdown | 8 -------- website/docs/r/database_user.html.markdown | 8 -------- website/docs/r/encryption_at_rest.html.markdown | 10 +--------- website/docs/r/event_trigger.html.markdown | 8 -------- .../docs/r/federated_database_instance.html.markdown | 8 -------- website/docs/r/federated_query_limit.html.markdown | 8 -------- ...federated_settings_identity_provider.html.markdown | 8 -------- .../r/federated_settings_org_config.html.markdown | 9 --------- .../federated_settings_org_role_mapping.html.markdown | 8 -------- website/docs/r/global_cluster_config.html.markdown | 9 --------- website/docs/r/ldap_configuration.html.markdown | 8 -------- website/docs/r/ldap_verify.html.markdown | 8 -------- website/docs/r/maintenance_window.html.markdown | 8 -------- website/docs/r/network_container.html.markdown | 8 -------- website/docs/r/network_peering.html.markdown | 8 -------- website/docs/r/online_archive.html.markdown | 8 -------- website/docs/r/org_invitation.html.markdown | 8 -------- website/docs/r/organization.html.markdown | 8 -------- .../r/private_endpoint_regional_mode.html.markdown | 8 -------- website/docs/r/privatelink_endpoint.html.markdown | 8 -------- .../r/privatelink_endpoint_serverless.html.markdown | 9 --------- .../docs/r/privatelink_endpoint_service.html.markdown | 8 -------- ...rvice_data_federation_online_archive.html.markdown | 8 -------- ...vatelink_endpoint_service_serverless.html.markdown | 9 --------- website/docs/r/project.html.markdown | 8 -------- website/docs/r/project_api_key.html.markdown | 8 -------- website/docs/r/project_invitation.html.markdown | 8 -------- website/docs/r/project_ip_access_list.html.markdown | 8 -------- website/docs/r/push_based_log_export.html.markdown | 9 --------- website/docs/r/search_deployment.html.markdown | 9 --------- website/docs/r/search_index.html.markdown | 8 -------- website/docs/r/serverless_instance.html.markdown | 8 -------- website/docs/r/stream_connection.html.markdown | 8 -------- website/docs/r/stream_instance.html.markdown | 8 -------- website/docs/r/team.html.markdown | 8 -------- website/docs/r/teams.html.markdown | 8 +++----- website/docs/r/third_party_integration.markdown | 8 -------- .../r/x509_authentication_database_user.html.markdown | 8 -------- website/docs/troubleshooting.html.markdown | 8 -------- 166 files changed, 51 insertions(+), 1340 deletions(-) diff --git a/templates/data-source.md.tmpl b/templates/data-source.md.tmpl index 233a276c54..15038928c2 100644 --- a/templates/data-source.md.tmpl +++ b/templates/data-source.md.tmpl @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: {{ if .Name }}"MongoDB Atlas: {{.Name}}"{{ end }} -sidebar_current: {{ if .Type }}"docs-{{ .ProviderShortName }}-{{ $arr := split .Type " "}}{{ range $element := $arr }}{{ $element | lower}}{{ end }}{{ $name := slice (split .Name "_") 1 }}{{ range $element := $name }}-{{ $element | lower}}{{end}}"{{ end }} -description: |- - {{ if ne .Name "" }}"Provides a {{ .Name }} data source."{{ end }} ---- - # {{ if .Name }}{{.Type}}: {{.Name}}{{ end }} {{ if .Description }} {{ .Description | trimspace }} {{ end }} diff --git a/templates/data-sources/control_plane_ip_addresses.md.tmpl b/templates/data-sources/control_plane_ip_addresses.md.tmpl index 35f2ceed24..c1e6d6dc51 100644 --- a/templates/data-sources/control_plane_ip_addresses.md.tmpl +++ b/templates/data-sources/control_plane_ip_addresses.md.tmpl @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: {{.Name}}" -sidebar_current: "docs-{{ .ProviderShortName }}-{{ $arr := split .Type " "}}{{ range $element := $arr }}{{ $element | lower}}{{ end }}{{ $name := slice (split .Name "_") 1 }}{{ range $element := $name }}-{{ $element | lower}}{{end}}" -description: |- - "Provides a data source that returns all control plane IP addresses" ---- - # {{.Type}}: {{.Name}} {{ .Description | trimspace }} diff --git a/templates/data-sources/push_based_log_export.md.tmpl b/templates/data-sources/push_based_log_export.md.tmpl index 59e0bbffdf..03255fcd68 100644 --- a/templates/data-sources/push_based_log_export.md.tmpl +++ b/templates/data-sources/push_based_log_export.md.tmpl @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: {{.Name}}" -sidebar_current: "docs-{{ .ProviderShortName }}-{{ $arr := split .Type " "}}{{ range $element := $arr }}{{ $element | lower}}{{ end }}{{ $name := slice (split .Name "_") 1 }}{{ range $element := $name }}-{{ $element | lower}}{{end}}" -description: |- - "Provides a data source for push-based log export feature." ---- - # {{.Type}}: {{.Name}} {{ .Description | trimspace }} diff --git a/templates/data-sources/search_deployment.md.tmpl b/templates/data-sources/search_deployment.md.tmpl index 228acf91d4..b20f0829e6 100644 --- a/templates/data-sources/search_deployment.md.tmpl +++ b/templates/data-sources/search_deployment.md.tmpl @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: {{.Name}}" -sidebar_current: "docs-{{ .ProviderShortName }}-{{ $arr := split .Type " "}}{{ range $element := $arr }}{{ $element | lower}}{{ end }}{{ $name := slice (split .Name "_") 1 }}{{ range $element := $name }}-{{ $element | lower}}{{end}}" -description: |- - "Provides a Search Deployment data source." ---- - # {{.Type}}: {{.Name}} {{ .Description | trimspace }} diff --git a/templates/resources.md.tmpl b/templates/resources.md.tmpl index 6951cac4b1..d81c9cfb75 100644 --- a/templates/resources.md.tmpl +++ b/templates/resources.md.tmpl @@ -1,10 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: {{ if .Name }}"MongoDB Atlas: {{.Name}}{{ end }}" -sidebar_current: {{ if .Type }}"docs-{{ .ProviderShortName }}-{{ $arr := split .Type " "}}{{ range $element := $arr }}{{ $element | lower}}{{ end }}{{ $name := slice (split .Name "_") 1 }}{{ range $element := $name }}-{{ $element | lower}}{{end}}"{{ end }} -description: |- - {{ if .Name }}"Provides a {{ .Name }} resource."{{ end }} ---- #{{ if .Name }} {{.Type}}: {{.Name}}{{ end }} {{ if .Name }}{{ .Description | trimspace }}{{ end }} diff --git a/templates/resources/push_based_log_export.md.tmpl b/templates/resources/push_based_log_export.md.tmpl index a12f730a72..ad2634f582 100644 --- a/templates/resources/push_based_log_export.md.tmpl +++ b/templates/resources/push_based_log_export.md.tmpl @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: {{.Name}}" -sidebar_current: "docs-{{ .ProviderShortName }}-{{ $arr := split .Type " "}}{{ range $element := $arr }}{{ $element | lower}}{{ end }}{{ $name := slice (split .Name "_") 1 }}{{ range $element := $name }}-{{ $element | lower}}{{end}}" -description: |- - "Provides resource for push-based log export feature." ---- - # {{.Type}}: {{.Name}} {{ .Description | trimspace }} diff --git a/templates/resources/search_deployment.md.tmpl b/templates/resources/search_deployment.md.tmpl index f7aaa97efa..1e503a8982 100644 --- a/templates/resources/search_deployment.md.tmpl +++ b/templates/resources/search_deployment.md.tmpl @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: {{.Name}}" -sidebar_current: "docs-{{ .ProviderShortName }}-{{ $arr := split .Type " "}}{{ range $element := $arr }}{{ $element | lower}}{{ end }}{{ $name := slice (split .Name "_") 1 }}{{ range $element := $name }}-{{ $element | lower}}{{end}}" -description: |- - "Provides a Search Deployment resource." ---- - # {{.Type}}: {{.Name}} {{ .Description | trimspace }} diff --git a/website/docs/d/access_list_api_key.html.markdown b/website/docs/d/access_list_api_key.html.markdown index 2d3f664050..3ed0e63db0 100644 --- a/website/docs/d/access_list_api_key.html.markdown +++ b/website/docs/d/access_list_api_key.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: access_list_api_key" -sidebar_current: "docs-mongodbatlas-datasource-access-list-api-key" -description: |- - Displays the access list entries for the specified Atlas Organization API Key. ---- - # Data Source: mongodbatlas_access_list_api_key `mongodbatlas_access_list_api_key` describes an Access List API Key entry resource. The access list grants access from IPs, CIDRs) to clusters within the Project. diff --git a/website/docs/d/access_list_api_keys.html.markdown b/website/docs/d/access_list_api_keys.html.markdown index 06d601ac8f..4a204f9126 100644 --- a/website/docs/d/access_list_api_keys.html.markdown +++ b/website/docs/d/access_list_api_keys.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: access_list_api_keys" -sidebar_current: "docs-mongodbatlas-datasource-access-list-api-keys" -description: |- - Displays the access list entries for the specified Atlas Organization API Key. Atlas resources require that all API requests originate from IP addresses on the API access list. ---- - # Data Source: mongodbatlas_access_list_api_key `mongodbatlas_access_list_api_keys` describes an Access List API Key entry resource. The access list grants access from IPs, CIDRs) to clusters within the Project. diff --git a/website/docs/d/advanced_cluster.html.markdown b/website/docs/d/advanced_cluster.html.markdown index 994b8a6720..73c63dc8a5 100644 --- a/website/docs/d/advanced_cluster.html.markdown +++ b/website/docs/d/advanced_cluster.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: advanced_cluster" -sidebar_current: "docs-mongodbatlas-datasource-advanced-cluster" -description: |- - Describe an Advanced Cluster. ---- - # Data Source: mongodbatlas_advanced_cluster `mongodbatlas_advanced_cluster` describes an Advanced Cluster. The data source requires your Project ID. diff --git a/website/docs/d/advanced_clusters.html.markdown b/website/docs/d/advanced_clusters.html.markdown index 83d2d99c98..31c1c84479 100644 --- a/website/docs/d/advanced_clusters.html.markdown +++ b/website/docs/d/advanced_clusters.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cluster" -sidebar_current: "docs-mongodbatlas-datasource-clusters" -description: |- - Describe all Advanced Clusters in Project. ---- - # Data Source: mongodbatlas_clusters `mongodbatlas_cluster` describes all Advanced Clusters by the provided project_id. The data source requires your Project ID. diff --git a/website/docs/d/alert_configuration.html.markdown b/website/docs/d/alert_configuration.html.markdown index 5672d173a2..5457e56f36 100644 --- a/website/docs/d/alert_configuration.html.markdown +++ b/website/docs/d/alert_configuration.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: alert_configuration" -sidebar_current: "docs-mongodbatlas-datasource-alert-configuration" -description: |- - Describes a Alert Configuration. ---- - # Data Source: mongodbatlas_alert_configuration `mongodbatlas_alert_configuration` describes an Alert Configuration. diff --git a/website/docs/d/alert_configurations.html.markdown b/website/docs/d/alert_configurations.html.markdown index 6f835179df..9169a46a7d 100644 --- a/website/docs/d/alert_configurations.html.markdown +++ b/website/docs/d/alert_configurations.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: Alert Configurations" -sidebar_current: "docs-mongodbatlas-datasource-alert-configurations" -description: |- - Describe all Alert Configurations in Project. ---- - # Data Source: mongodbatlas_alert_configurations `mongodbatlas_alert_configurations` describes all Alert Configurations by the provided project_id. The data source requires your Project ID. diff --git a/website/docs/d/api_key.html.markdown b/website/docs/d/api_key.html.markdown index bad605b861..a7db3055cf 100644 --- a/website/docs/d/api_key.html.markdown +++ b/website/docs/d/api_key.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: api_key" -sidebar_current: "docs-mongodbatlas-datasource-api-key" -description: |- - Describes a API Key. ---- - # Data Source: mongodbatlas_api_key `mongodbatlas_api_key` describes a MongoDB Atlas API Key. This represents a API Key that has been created. diff --git a/website/docs/d/api_keys.html.markdown b/website/docs/d/api_keys.html.markdown index 585f99d5e7..a1d6c28f84 100644 --- a/website/docs/d/api_keys.html.markdown +++ b/website/docs/d/api_keys.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: api_keys" -sidebar_current: "docs-mongodbatlas-api-keys" -description: |- - Describes a API Keys. ---- - # Data Source: mongodbatlas_api_keys `mongodbatlas_api_keys` describe all API Keys. This represents API Keys that have been created. diff --git a/website/docs/d/atlas_user.html.markdown b/website/docs/d/atlas_user.html.markdown index a9a6afca68..8216f65ca6 100644 --- a/website/docs/d/atlas_user.html.markdown +++ b/website/docs/d/atlas_user.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: atlas_user" -sidebar_current: "docs-mongodbatlas-datasource-atlas-user" -description: |- - Provides a Atlas User Datasource. ---- - # Data Source: mongodbatlas_atlas_user `mongodbatlas_atlas_user` Provides a MongoDB Atlas User. diff --git a/website/docs/d/atlas_users.html.markdown b/website/docs/d/atlas_users.html.markdown index 60640fc1fb..2ad0490c18 100644 --- a/website/docs/d/atlas_users.html.markdown +++ b/website/docs/d/atlas_users.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: atlas_users" -sidebar_current: "docs-mongodbatlas-datasource-mongodbatlas-atlas_users" -description: |- - Provides a Atlas Users Datasource. ---- - # Data Source: atlas_users `atlas_users` provides Atlas Users associated with a specified Organization, Project, or Team. diff --git a/website/docs/d/auditing.html.markdown b/website/docs/d/auditing.html.markdown index 73578518d1..1f52a9f787 100644 --- a/website/docs/d/auditing.html.markdown +++ b/website/docs/d/auditing.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: auditing" -sidebar_current: "docs-mongodbatlas-datasource-auditing" -description: |- - Describes a Auditing. ---- - # Data Source: mongodbatlas_auditing `mongodbatlas_auditing` describes a Auditing. diff --git a/website/docs/d/backup_compliance_policy.html.markdown b/website/docs/d/backup_compliance_policy.html.markdown index eff08746d8..abcc0a1687 100644 --- a/website/docs/d/backup_compliance_policy.html.markdown +++ b/website/docs/d/backup_compliance_policy.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: backup_compliance_policy" -sidebar_current: "docs-mongodbatlas-datasource-backup-compliance-policy" -description: |- - Provides a Backup Compliance Policy Datasource. ---- - # Data Source: mongodbatlas_backup_compliance_policy `mongodbatlas_backup_compliance_policy` provides an Atlas Backup Compliance Policy. An Atlas Backup Compliance Policy contains the current protection policy settings for a project. A compliance policy prevents any user, regardless of role, from modifying or deleting specific cluster configurations and backups. To disable a Backup Compliance Policy, you must contact MongoDB support. Backup Compliance Policies are only supported for clusters M10 and higher and are applied as the minimum policy for all clusters. diff --git a/website/docs/d/cloud_backup_schedule.html.markdown b/website/docs/d/cloud_backup_schedule.html.markdown index 7beb7250e5..a3bca33bc8 100644 --- a/website/docs/d/cloud_backup_schedule.html.markdown +++ b/website/docs/d/cloud_backup_schedule.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_schedule" -sidebar_current: "docs-mongodbatlas-datasource-cloud-backup-schedule" -description: |- - Provides a Cloud Backup Schedule Datasource. ---- - # Data Source: mongodbatlas_cloud_backup_schedule `mongodbatlas_cloud_backup_schedule` provides a Cloud Backup Schedule datasource. An Atlas Cloud Backup Schedule provides the current cloud backup schedule for the cluster. diff --git a/website/docs/d/cloud_backup_snapshot.html.markdown b/website/docs/d/cloud_backup_snapshot.html.markdown index 79bf56bc11..80297fafc7 100644 --- a/website/docs/d/cloud_backup_snapshot.html.markdown +++ b/website/docs/d/cloud_backup_snapshot.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshot" -description: |- - Provides a Cloud Backup Snapshot Datasource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshot `mongodbatlas_cloud_backup_snapshot` provides an Cloud Backup Snapshot datasource. Atlas Cloud Backup Snapshots provide localized backup storage using the native snapshot functionality of the cluster’s cloud service. diff --git a/website/docs/d/cloud_backup_snapshot_export_bucket.html.markdown b/website/docs/d/cloud_backup_snapshot_export_bucket.html.markdown index 0183ef0952..a715db503b 100644 --- a/website/docs/d/cloud_backup_snapshot_export_bucket.html.markdown +++ b/website/docs/d/cloud_backup_snapshot_export_bucket.html.markdown @@ -1,12 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_export_bucket" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshot_export_bucket" -description: |- - Provides a Cloud Backup Snapshot Export Bucket resource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshot_export_bucket + `mongodbatlas_cloud_backup_snapshot_export_bucket` datasource allows you to retrieve all the buckets for the specified project. diff --git a/website/docs/d/cloud_backup_snapshot_export_buckets.html.markdown b/website/docs/d/cloud_backup_snapshot_export_buckets.html.markdown index 54c6be8e4b..d57e565439 100644 --- a/website/docs/d/cloud_backup_snapshot_export_buckets.html.markdown +++ b/website/docs/d/cloud_backup_snapshot_export_buckets.html.markdown @@ -1,12 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_export_buckets" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshot_export_buckets" -description: |- -Provides a Cloud Backup Snapshot Export Bucket resource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshot_export_buckets + `mongodbatlas_cloud_backup_snapshot_export_buckets` datasource allows you to retrieve all the buckets for the specified project. diff --git a/website/docs/d/cloud_backup_snapshot_export_job.html.markdown b/website/docs/d/cloud_backup_snapshot_export_job.html.markdown index 29afeb796c..6307ef5a10 100644 --- a/website/docs/d/cloud_backup_snapshot_export_job.html.markdown +++ b/website/docs/d/cloud_backup_snapshot_export_job.html.markdown @@ -1,12 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_export_job" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshot_export_job" -description: |- - Provides a Cloud Backup Snapshot Export Job resource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshot_export_Job + `mongodbatlas_cloud_backup_snapshot_export_job` datasource allows you to retrieve a snapshot export job for the specified project and cluster. diff --git a/website/docs/d/cloud_backup_snapshot_export_jobs.html.markdown b/website/docs/d/cloud_backup_snapshot_export_jobs.html.markdown index 6cda1a7c90..5ffb6a7a07 100644 --- a/website/docs/d/cloud_backup_snapshot_export_jobs.html.markdown +++ b/website/docs/d/cloud_backup_snapshot_export_jobs.html.markdown @@ -1,12 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_export_jobs" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshot_export_jobs" -description: |- - Provides a Cloud Backup Snapshot Export Jobs resource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshot_export_jobs + `mongodbatlas_cloud_backup_snapshot_export_jobs` datasource allows you to retrieve all the buckets for the specified project. diff --git a/website/docs/d/cloud_backup_snapshot_restore_job.html.markdown b/website/docs/d/cloud_backup_snapshot_restore_job.html.markdown index 856ceb0f50..909d83aee8 100644 --- a/website/docs/d/cloud_backup_snapshot_restore_job.html.markdown +++ b/website/docs/d/cloud_backup_snapshot_restore_job.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_restore_job" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshot_restore_job" -description: |- - Provides a Cloud Backup Snapshot Restore Job Datasource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshot_restore_job `mongodbatlas_cloud_backup_snapshot_restore_job` provides a Cloud Backup Snapshot Restore Job datasource. Gets all the cloud backup snapshot restore jobs for the specified cluster. diff --git a/website/docs/d/cloud_backup_snapshot_restore_jobs.html.markdown b/website/docs/d/cloud_backup_snapshot_restore_jobs.html.markdown index e00b51ba16..df3c181fd1 100644 --- a/website/docs/d/cloud_backup_snapshot_restore_jobs.html.markdown +++ b/website/docs/d/cloud_backup_snapshot_restore_jobs.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_restore_jobs" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshot_restore_jobs" -description: |- - Provides a Cloud Backup Snapshot Restore Jobs Datasource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshot_restore_jobs `mongodbatlas_cloud_backup_snapshot_restore_jobs` provides a Cloud Backup Snapshot Restore Jobs datasource. Gets all the cloud backup snapshot restore jobs for the specified cluster. diff --git a/website/docs/d/cloud_backup_snapshots.html.markdown b/website/docs/d/cloud_backup_snapshots.html.markdown index efe8036856..fd3e2e0e21 100644 --- a/website/docs/d/cloud_backup_snapshots.html.markdown +++ b/website/docs/d/cloud_backup_snapshots.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshots" -sidebar_current: "docs-mongodbatlas-datasource-cloud_backup_snapshots" -description: |- - Provides an Cloud Backup Snapshot Datasource. ---- - # Data Source: mongodbatlas_cloud_backup_snapshots `mongodbatlas_cloud_backup_snapshots` provides an Cloud Backup Snapshot datasource. Atlas Cloud Backup Snapshots provide localized backup storage using the native snapshot functionality of the cluster’s cloud service. diff --git a/website/docs/d/cloud_provider_access_setup.markdown b/website/docs/d/cloud_provider_access_setup.markdown index f4cd0a4c40..03f0cc315a 100644 --- a/website/docs/d/cloud_provider_access_setup.markdown +++ b/website/docs/d/cloud_provider_access_setup.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_cloud_provider_access_setup" -sidebar_current: "docs-mongodbatlas-datasource-cloud-provider-access-setup" -description: |- - Allows you to get the a single role for cloud provider access setup ---- - # Data Source: mongodbatlas_cloud_provider_access_setup `mongodbatlas_cloud_provider_access_setup` allows you to get a single role for a provider access role setup, currently only AWS and Azure are supported. diff --git a/website/docs/d/cloud_provider_shared_tier_restore_job.html.markdown b/website/docs/d/cloud_provider_shared_tier_restore_job.html.markdown index 6f1797da35..d784acbedb 100644 --- a/website/docs/d/cloud_provider_shared_tier_restore_job.html.markdown +++ b/website/docs/d/cloud_provider_shared_tier_restore_job.html.markdown @@ -1,12 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_shared_tier_restore_job" -sidebar_current: "docs-mongodbatlas-datasource-mongodbatlas-shared-tier-restore-jobs" -description: |- - Provides a Cloud Backup Shared Tier Snapshot Restore Job Datasource. ---- - - # Data Source: mongodbatlas_shared_tier_restore_job `mongodbatlas_shared_tier_restore_job` provides a Cloud Backup Snapshot Restore Job data source for Shared Tier Clusters. Gets the cloud backup snapshot restore jobs for the specified shared tier cluster. diff --git a/website/docs/d/cloud_provider_shared_tier_restore_jobs.html.markdown b/website/docs/d/cloud_provider_shared_tier_restore_jobs.html.markdown index e899612683..b90b9f0762 100644 --- a/website/docs/d/cloud_provider_shared_tier_restore_jobs.html.markdown +++ b/website/docs/d/cloud_provider_shared_tier_restore_jobs.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_shared_tier_restore_job" -sidebar_current: "docs-mongodbatlas-datasource-mongodbatlas-shared-tier-restore-job" -description: |- - Provides a Cloud Backup Shared Tier Snapshot Restore Jobs Datasource. ---- - # Data Source: mongodbatlas_shared_tier_restore_jobs `mongodbatlas_shared_tier_restore_jobs` provides Cloud Backup Snapshot Restore Jobs data source for Shared Tier Clusters. Gets all the cloud backup snapshot restore jobs for the specified shared tier cluster. diff --git a/website/docs/d/cloud_provider_shared_tier_snapshot.html.markdown b/website/docs/d/cloud_provider_shared_tier_snapshot.html.markdown index 72b66b8ee9..76d7855855 100644 --- a/website/docs/d/cloud_provider_shared_tier_snapshot.html.markdown +++ b/website/docs/d/cloud_provider_shared_tier_snapshot.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_shared_tier_snapshot" -sidebar_current: "docs-mongodbatlas-datasource-cloud_provider_shared_tier_snapshot" -description: |- - Provides an Cloud Backup Snapshot Datasource for Shared Tier Clusters. ---- - # Data Source: mongodbatlas_shared_tier_snapshot `mongodbatlas_shared_tier_snapshot` provides an Cloud Backup Snapshot data source for Shared Tier Clusters. Atlas Cloud Backup Snapshots provide localized backup storage using the native snapshot functionality of the cluster’s cloud service. diff --git a/website/docs/d/cloud_provider_shared_tier_snapshots.html.markdown b/website/docs/d/cloud_provider_shared_tier_snapshots.html.markdown index 15d2e86c02..12f398916d 100644 --- a/website/docs/d/cloud_provider_shared_tier_snapshots.html.markdown +++ b/website/docs/d/cloud_provider_shared_tier_snapshots.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_shared_tier_snapshots" -sidebar_current: "docs-mongodbatlas-datasource-cloud_provider_shared_tier_snapshots" -description: |- - Provides a Cloud Backup Snapshots Datasource for Shared Tier Clusters. ---- - # Data Source: mongodbatlas_shared_tier_snapshots `mongodbatlas_shared_tier_snapshots` provides an Cloud Backup Snapshots data source for Shared Tier Clusters. Atlas Cloud Backup Snapshots provide localized backup storage using the native snapshot functionality of the cluster’s cloud service. diff --git a/website/docs/d/cloud_provider_snapshot.html.markdown b/website/docs/d/cloud_provider_snapshot.html.markdown index d89c19a654..c164eb95bc 100644 --- a/website/docs/d/cloud_provider_snapshot.html.markdown +++ b/website/docs/d/cloud_provider_snapshot.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshot" -sidebar_current: "docs-mongodbatlas-datasource-cloud_provider_snapshot" -description: |- - Provides an Cloud Backup Snapshot Datasource. +subcategory: "Deprecated" --- **WARNING:** This datasource is deprecated, use `mongodbatlas_cloud_backup_snapshot` diff --git a/website/docs/d/cloud_provider_snapshot_backup_policy.html.markdown b/website/docs/d/cloud_provider_snapshot_backup_policy.html.markdown index 0a1b97fcf8..d06361850a 100644 --- a/website/docs/d/cloud_provider_snapshot_backup_policy.html.markdown +++ b/website/docs/d/cloud_provider_snapshot_backup_policy.html.markdown @@ -1,12 +1,7 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshot_backup_policy" -sidebar_current: "docs-mongodbatlas-datasource-cloud-provider-snapshot-backup-policy" -description: |- - Provides a Cloud Backup Snapshot Policy Datasource. +subcategory: "Deprecated" --- - **WARNING:** This data source is deprecated, use `mongodbatlas_cloud_backup_schedule` **Note:** This resource have now been fully deprecated as part of v1.10.0 release diff --git a/website/docs/d/cloud_provider_snapshot_restore_job.html.markdown b/website/docs/d/cloud_provider_snapshot_restore_job.html.markdown index 0dcdf06cb7..8b58423e69 100644 --- a/website/docs/d/cloud_provider_snapshot_restore_job.html.markdown +++ b/website/docs/d/cloud_provider_snapshot_restore_job.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshot_restore_job" -sidebar_current: "docs-mongodbatlas-datasource-cloud_provider_snapshot_restore_job" -description: |- - Provides a Cloud Backup Snapshot Restore Job Datasource. +subcategory: "Deprecated" --- **WARNING:** This datasource is deprecated, use `mongodbatlas_cloud_backup_snapshot_restore_job` diff --git a/website/docs/d/cloud_provider_snapshot_restore_jobs.html.markdown b/website/docs/d/cloud_provider_snapshot_restore_jobs.html.markdown index 5ea2e2e223..d6d46c82eb 100644 --- a/website/docs/d/cloud_provider_snapshot_restore_jobs.html.markdown +++ b/website/docs/d/cloud_provider_snapshot_restore_jobs.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshot_restore_jobs" -sidebar_current: "docs-mongodbatlas-datasource-cloud_provider_snapshot_restore_jobs" -description: |- - Provides a Cloud Backup Snapshot Restore Jobs Datasource. +subcategory: "Deprecated" --- **WARNING:** This datasource is deprecated, use `mongodbatlas_cloud_backup_snapshots_restore_jobs` diff --git a/website/docs/d/cloud_provider_snapshots.html.markdown b/website/docs/d/cloud_provider_snapshots.html.markdown index 3dbe56d565..7c8a63773d 100644 --- a/website/docs/d/cloud_provider_snapshots.html.markdown +++ b/website/docs/d/cloud_provider_snapshots.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshots" -sidebar_current: "docs-mongodbatlas-datasource-cloud_provider_snapshots" -description: |- - Provides an Cloud Backup Snapshot Datasource. +subcategory: "Deprecated" --- **WARNING:** This datasource is deprecated, use `mongodbatlas_cloud_backup_snapshots` diff --git a/website/docs/d/cluster.html.markdown b/website/docs/d/cluster.html.markdown index 3ffbaf42c0..4aa219be3d 100644 --- a/website/docs/d/cluster.html.markdown +++ b/website/docs/d/cluster.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cluster" -sidebar_current: "docs-mongodbatlas-datasource-cluster" -description: |- - Describe a Cluster. ---- - # Data Source: mongodbatlas_cluster `mongodbatlas_cluster` describes a Cluster. The data source requires your Project ID. diff --git a/website/docs/d/cluster_outage_simulation.html.markdown b/website/docs/d/cluster_outage_simulation.html.markdown index a38aebb36e..090e5cb891 100644 --- a/website/docs/d/cluster_outage_simulation.html.markdown +++ b/website/docs/d/cluster_outage_simulation.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cluster_outage_simulation" -sidebar_current: "docs-mongodbatlas-resource-federated-database-instance" -description: |- - Provides a Cluster Outage Simulation resource. ---- - # Data Source: mongodbatlas_cluster_outage_simulation `mongodbatlas_cluster_outage_simulation` provides a Cluster Outage Simulation resource. For more details see https://www.mongodb.com/docs/atlas/tutorial/test-resilience/simulate-regional-outage/ diff --git a/website/docs/d/clusters.html.markdown b/website/docs/d/clusters.html.markdown index a7597fbc88..b37cff038b 100644 --- a/website/docs/d/clusters.html.markdown +++ b/website/docs/d/clusters.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cluster" -sidebar_current: "docs-mongodbatlas-datasource-clusters" -description: |- - Describe all Clusters in Project. ---- - # Data Source: mongodbatlas_clusters `mongodbatlas_cluster` describes all Clusters by the provided project_id. The data source requires your Project ID. diff --git a/website/docs/d/control_plane_ip_addresses.html.markdown b/website/docs/d/control_plane_ip_addresses.html.markdown index 79145c4afb..7da3d4b5a3 100644 --- a/website/docs/d/control_plane_ip_addresses.html.markdown +++ b/website/docs/d/control_plane_ip_addresses.html.markdown @@ -1,15 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_control_plane_ip_addresses" -sidebar_current: "docs-mongodbatlas-datasource-control-plane-ip-addresses" -description: |- - "Provides a data source that returns all control plane IP addresses" ---- - # Data Source: mongodbatlas_control_plane_ip_addresses - -Provides a data source that returns all control plane IP addresses. +`mongodbatlas_control_plane_ip_addresses` provides a data source that returns all control plane IP addresses. ## Example Usages ```terraform diff --git a/website/docs/d/custom_db_role.html.markdown b/website/docs/d/custom_db_role.html.markdown index 9338ed0175..af92937e4c 100644 --- a/website/docs/d/custom_db_role.html.markdown +++ b/website/docs/d/custom_db_role.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: custom_db_role" -sidebar_current: "docs-mongodbatlas-datasource-custom-db-role" -description: |- - Describes a Custom DB Role. ---- - # Data Source: mongodbatlas_custom_db_role -`mongodbatlas_custom_db_role` describe a Custom DB Role. This represents a custom db role. +`mongodbatlas_custom_db_role` describes a Custom DB Role. This represents a custom db role. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/d/custom_db_roles.html.markdown b/website/docs/d/custom_db_roles.html.markdown index 0fb071fef6..e12515cb1e 100644 --- a/website/docs/d/custom_db_roles.html.markdown +++ b/website/docs/d/custom_db_roles.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: custom_db_roles" -sidebar_current: "docs-mongodbatlas-datasource-custom-db-roles" -description: |- - Describes a Custom DB Roles. ---- - # Data Source: mongodbatlas_custom_db_roles -`mongodbatlas_custom_db_roles` describe all Custom DB Roles. This represents a custom db roles. +`mongodbatlas_custom_db_roles` describes all Custom DB Roles. This represents a custom db roles. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/custom_dns_configuration_cluster_aws.html.markdown b/website/docs/d/custom_dns_configuration_cluster_aws.html.markdown index 40afdf12a0..f1b127a2af 100644 --- a/website/docs/d/custom_dns_configuration_cluster_aws.html.markdown +++ b/website/docs/d/custom_dns_configuration_cluster_aws.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: custom_dns_configuration_cluster_aws" -sidebar_current: "docs-mongodbatlas-datasource-custom_dns_configuration_cluster_aws" -description: |- - Describes a Custom DNS Configuration for Atlas Clusters on AWS. ---- - # Data Source: mongodbatlas_custom_dns_configuration_cluster_aws `mongodbatlas_custom_dns_configuration_cluster_aws` describes a Custom DNS Configuration for Atlas Clusters on AWS. diff --git a/website/docs/d/data_lake_pipeline.html.markdown b/website/docs/d/data_lake_pipeline.html.markdown index 892ed566fe..3e4d8aa410 100644 --- a/website/docs/d/data_lake_pipeline.html.markdown +++ b/website/docs/d/data_lake_pipeline.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: data_lake" -sidebar_current: "docs-mongodbatlas-resource-data-lake" -description: |- - Describe a Data Lake Pipeline. ---- - # Data Source: mongodbatlas_data_lake_pipeline `mongodbatlas_data_lake_pipeline` describes a Data Lake Pipeline. diff --git a/website/docs/d/data_lake_pipeline_run.html.markdown b/website/docs/d/data_lake_pipeline_run.html.markdown index 7076ebfb40..29bf7389c4 100644 --- a/website/docs/d/data_lake_pipeline_run.html.markdown +++ b/website/docs/d/data_lake_pipeline_run.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: data_lake_pipeline_run" -sidebar_current: "docs-mongodbatlas-datasource-data-lake-pipeline-run" -description: |- - Describes a Data Lake Pipeline Run. ---- - # Data Source: mongodbatlas_data_lake_pipeline_run -`mongodbatlas_data_lake_pipeline_run` describe a Data Lake Pipeline Run. +`mongodbatlas_data_lake_pipeline_run` describes a Data Lake Pipeline Run. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/data_lake_pipeline_runs.html.markdown b/website/docs/d/data_lake_pipeline_runs.html.markdown index e6bef8d43f..4e44f5459c 100644 --- a/website/docs/d/data_lake_pipeline_runs.html.markdown +++ b/website/docs/d/data_lake_pipeline_runs.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: data_lake_pipeline_runs" -sidebar_current: "docs-mongodbatlas-datasource-data-lake-pipeline-runs" -description: |- - Describes Data Lake Pipeline Runs. ---- - # Data Source: mongodbatlas_data_lake_pipeline_runs -`mongodbatlas_data_lake_pipeline_run` describe Data Lake Pipeline Runs. +`mongodbatlas_data_lake_pipeline_run` describes Data Lake Pipeline Runs. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/data_lake_pipelines.html.markdown b/website/docs/d/data_lake_pipelines.html.markdown index f4545c3856..4d8c442749 100644 --- a/website/docs/d/data_lake_pipelines.html.markdown +++ b/website/docs/d/data_lake_pipelines.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: data_lake" -sidebar_current: "docs-mongodbatlas-resource-data-lake" -description: |- - Describe Data Lake Pipelines. ---- - # Data Source: mongodbatlas_data_lake_pipelines `mongodbatlas_data_lake_pipelines` describes Data Lake Pipelines. diff --git a/website/docs/d/database_user.html.markdown b/website/docs/d/database_user.html.markdown index 1ab773bd8c..45645b4e56 100644 --- a/website/docs/d/database_user.html.markdown +++ b/website/docs/d/database_user.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: database_user" -sidebar_current: "docs-mongodbatlas-datasource-database-user" -description: |- - Describes a Database User. ---- - # Data Source: mongodbatlas_database_user -`mongodbatlas_database_user` describe a Database User. This represents a database user which will be applied to all clusters within the project. +`mongodbatlas_database_user` describes a Database User. This represents a database user which will be applied to all clusters within the project. Each user has a set of roles that provide access to the project’s databases. User's roles apply to all the clusters in the project: if two clusters have a `products` database and a user has a role granting `read` access on the products database, the user has that access on both clusters. diff --git a/website/docs/d/database_users.html.markdown b/website/docs/d/database_users.html.markdown index 4332fb5907..3750bb9c86 100644 --- a/website/docs/d/database_users.html.markdown +++ b/website/docs/d/database_users.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: database_users" -sidebar_current: "docs-mongodbatlas-datasource-database-users" -description: |- - Describes a Database Users. ---- - # Data Source: mongodbatlas_database_users -`mongodbatlas_database_users` describe all Database Users. This represents a database user which will be applied to all clusters within the project. +`mongodbatlas_database_users` describes all Database Users. This represents a database user which will be applied to all clusters within the project. Each user has a set of roles that provide access to the project’s databases. User's roles apply to all the clusters in the project: if two clusters have a `products` database and a user has a role granting `read` access on the products database, the user has that access on both clusters. diff --git a/website/docs/d/event_trigger.html.markdown b/website/docs/d/event_trigger.html.markdown index eb533494cc..dccd36c76f 100644 --- a/website/docs/d/event_trigger.html.markdown +++ b/website/docs/d/event_trigger.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: event_trigger" -sidebar_current: "docs-mongodbatlas-datasource-event-trigger" -description: |- - Describes an Event Trigger. ---- - # Data Source: mongodbatlas_event_trigger -`mongodbatlas_event_trigger` describe an Event Trigger. +`mongodbatlas_event_trigger` describes an Event Trigger. ## Example Usage diff --git a/website/docs/d/event_triggers.html.markdown b/website/docs/d/event_triggers.html.markdown index 4a6c06dad1..3f18acedf0 100644 --- a/website/docs/d/event_triggers.html.markdown +++ b/website/docs/d/event_triggers.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: event_triggers" -sidebar_current: "docs-mongodbatlas-datasource-event-triggers" -description: |- - Describes an Event Triggers. ---- - # Data Source: mongodbatlas_event_triggers -`mongodbatlas_event_triggers` describe all Event Triggers. +`mongodbatlas_event_triggers` describes all Event Triggers. ## Example Usage diff --git a/website/docs/d/federated_database_instance.html.markdown b/website/docs/d/federated_database_instance.html.markdown index 3e50182085..99cd27f463 100644 --- a/website/docs/d/federated_database_instance.html.markdown +++ b/website/docs/d/federated_database_instance.html.markdown @@ -1,12 +1,4 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: federated_database_instance" -sidebar_current: "docs-mongodbatlas-resource-federated-database-instance" -description: |- - Provides a Federated Database Instance resource. ---- - -# # Data Source: mongodbatlas_federated_database_instance +# Data Source: mongodbatlas_federated_database_instance `mongodbatlas_federated_database_instance` provides a Federated Database Instance data source. diff --git a/website/docs/d/federated_database_instances.html.markdown b/website/docs/d/federated_database_instances.html.markdown index 19674ec4fa..841709b844 100644 --- a/website/docs/d/federated_database_instances.html.markdown +++ b/website/docs/d/federated_database_instances.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: federated_database_instance" -sidebar_current: "docs-mongodbatlas-resource-federated-database-instance" -description: |- - Provides a Federated Database Instance resource. ---- - # Data Source: mongodbatlas_federated_database_instances `mongodbatlas_federated_database_instancess` provides a Federated Database Instance data source. diff --git a/website/docs/d/federated_query_limit.html.markdown b/website/docs/d/federated_query_limit.html.markdown index b7a2113a99..5ce6911f78 100644 --- a/website/docs/d/federated_query_limit.html.markdown +++ b/website/docs/d/federated_query_limit.html.markdown @@ -1,12 +1,4 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: federated_database_query_limit" -sidebar_current: "docs-mongodbatlas-resource-federated-query-limit" -description: |- - Provides a Federated Database Instance Query Limit. ---- - -## Data Source: mongodbatlas_federated_query_limit +# Data Source: mongodbatlas_federated_query_limit `mongodbatlas_federated_query_limit` provides a Federated Database Instance Query Limit data source. To learn more about Atlas Data Federation see https://www.mongodb.com/docs/atlas/data-federation/overview/. diff --git a/website/docs/d/federated_query_limits.html.markdown b/website/docs/d/federated_query_limits.html.markdown index 0a9b890a64..27c2682cb8 100644 --- a/website/docs/d/federated_query_limits.html.markdown +++ b/website/docs/d/federated_query_limits.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: federated_database_query_limit" -sidebar_current: "docs-mongodbatlas-resource-federated-query-limits" -description: |- - Provides a Federated Database Instance Query Limit. ---- - # Data Source: mongodbatlas_federated_query_limits `mongodbatlas_federated_query_limits` provides a Federated Database Instance Query Limits data source. To learn more about Atlas Data Federation see https://www.mongodb.com/docs/atlas/data-federation/overview/. diff --git a/website/docs/d/federated_settings.html.markdown b/website/docs/d/federated_settings.html.markdown index e3093ac29e..e99f339e45 100644 --- a/website/docs/d/federated_settings.html.markdown +++ b/website/docs/d/federated_settings.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings" -sidebar_current: "docs-mongodbatlas-datasource-federated-settings" -description: |- - Provides a federated settings data source. ---- - # Data Source: mongodbatlas_federated_settings `mongodbatlas_federated_settings` provides a federated settings data source. Atlas Cloud federated settings provides federated settings outputs. diff --git a/website/docs/d/federated_settings_identity_provider.html.markdown b/website/docs/d/federated_settings_identity_provider.html.markdown index d268acaacd..43eb965768 100644 --- a/website/docs/d/federated_settings_identity_provider.html.markdown +++ b/website/docs/d/federated_settings_identity_provider.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_identity_provider" -sidebar_current: "docs-mongodbatlas-datasource-federated-settings-identity-provider" -description: |- - Provides a federated settings Organization identity provider data source. ---- - # Data Source: mongodbatlas_federated_settings_identity_provider `mongodbatlas_federated_settings_identity_provider` provides a federated settings identity provider data source. Atlas federated settings identity provider provides federated settings outputs for the configured identity provider. diff --git a/website/docs/d/federated_settings_identity_providers.html.markdown b/website/docs/d/federated_settings_identity_providers.html.markdown index b3bfe78b8d..0e8fad7380 100644 --- a/website/docs/d/federated_settings_identity_providers.html.markdown +++ b/website/docs/d/federated_settings_identity_providers.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_identity_providers" -sidebar_current: "docs-mongodbatlas-datasource-federated-settings-identity-providers" -description: |- - Provides a federated settings Organization Identity Provider datasource. ---- - # Data Source: mongodbatlas_federated_settings_identity_providers `mongodbatlas_federated_settings_identity_providers` provides an Federated Settings Identity Providers datasource. Atlas Cloud Federated Settings Identity Providers provides federated settings outputs for the configured Identity Providers. diff --git a/website/docs/d/federated_settings_org_config.html.markdown b/website/docs/d/federated_settings_org_config.html.markdown index f2ea765eab..c1faba4446 100644 --- a/website/docs/d/federated_settings_org_config.html.markdown +++ b/website/docs/d/federated_settings_org_config.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_org_config" -sidebar_current: "docs-mongodbatlas-datasource-federated-settings-org-config" -description: |- - Provides a federated settings Organization Configuration. ---- - # Data Source: mongodbatlas_federated_settings_org_config `mongodbatlas_federated_settings_org_config` provides an Federated Settings Identity Providers datasource. Atlas Cloud Federated Settings Organizational configuration provides federated settings outputs for the configured Organizational configuration. diff --git a/website/docs/d/federated_settings_org_configs.html.markdown b/website/docs/d/federated_settings_org_configs.html.markdown index cbaa72ee63..8c208fac58 100644 --- a/website/docs/d/federated_settings_org_configs.html.markdown +++ b/website/docs/d/federated_settings_org_configs.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_org_configs" -sidebar_current: "docs-mongodbatlas-datasource-federated-settings-org-configs" -description: |- - Provides a federated settings Organization Configurations. ---- - # Data Source: mongodbatlas_federated_settings_org_configs `mongodbatlas_federated_settings_org_configs` provides an Federated Settings Identity Providers datasource. Atlas Cloud Federated Settings Identity Providers provides federated settings outputs for the configured Identity Providers. diff --git a/website/docs/d/federated_settings_org_role_mapping.html.markdown b/website/docs/d/federated_settings_org_role_mapping.html.markdown index 140866d39d..3bb8d49be4 100644 --- a/website/docs/d/federated_settings_org_role_mapping.html.markdown +++ b/website/docs/d/federated_settings_org_role_mapping.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_role_mapping" -sidebar_current: "docs-mongodbatlas-datasource-federated-settings-role-mapping" -description: |- - Provides a federated settings Role Mapping datasource. ---- - # Data Source: mongodbatlas_federated_settings_org_role_mapping `mongodbatlas_federated_settings_org_role_mapping` provides an Federated Settings Org Role Mapping datasource. Atlas Cloud Federated Settings Org Role Mapping provides federated settings outputs for the configured Org Role Mapping. diff --git a/website/docs/d/federated_settings_org_role_mappings.html.markdown b/website/docs/d/federated_settings_org_role_mappings.html.markdown index 37626c58c3..79f40940c2 100644 --- a/website/docs/d/federated_settings_org_role_mappings.html.markdown +++ b/website/docs/d/federated_settings_org_role_mappings.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_role_mappings" -sidebar_current: "docs-mongodbatlas-datasource-federated-settings-role-mappings" -description: |- - Provides a federated settings Role Mapping datasource. ---- - # Data Source: mongodbatlas_federated_settings_org_role_mappings `mongodbatlas_federated_settings_org_role_mappings` provides an Federated Settings Org Role Mapping datasource. Atlas Cloud Federated Settings Org Role Mapping provides federated settings outputs for the configured Org Role Mapping. diff --git a/website/docs/d/global_cluster_config.html.markdown b/website/docs/d/global_cluster_config.html.markdown index 93d541347a..dd74ba44da 100644 --- a/website/docs/d/global_cluster_config.html.markdown +++ b/website/docs/d/global_cluster_config.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: global_cluster_config" -sidebar_current: "docs-mongodbatlas-datasource-global-cluster-config" -description: |- - Describes the Global Cluster Configuration. ---- - # Data Source: mongodbatlas_global_cluster_config `mongodbatlas_global_cluster_config` describes all managed namespaces and custom zone mappings associated with the specified Global Cluster. diff --git a/website/docs/d/ldap_configuration.html.markdown b/website/docs/d/ldap_configuration.html.markdown index 0463ea0778..3187cb6610 100644 --- a/website/docs/d/ldap_configuration.html.markdown +++ b/website/docs/d/ldap_configuration.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: ldap configuration" -sidebar_current: "docs-mongodbatlas-datasource-ldap-configuration" -description: |- - Describes a LDAP Configuration. ---- - # Data Source: mongodbatlas_ldap_configuration `mongodbatlas_ldap_configuration` describes a LDAP Configuration. diff --git a/website/docs/d/ldap_verify.html.markdown b/website/docs/d/ldap_verify.html.markdown index 87f3442dfa..68a4e66fbe 100644 --- a/website/docs/d/ldap_verify.html.markdown +++ b/website/docs/d/ldap_verify.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: ldap verify" -sidebar_current: "docs-mongodbatlas-datasource-ldap-verify" -description: |- - Describes a LDAP Verify. ---- - # Data Source: mongodbatlas_ldap_verify `mongodbatlas_ldap_verify` describes a LDAP Verify. diff --git a/website/docs/d/maintenance_window.html.markdown b/website/docs/d/maintenance_window.html.markdown index 4043e45349..69cf2e639a 100644 --- a/website/docs/d/maintenance_window.html.markdown +++ b/website/docs/d/maintenance_window.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: maintenance_window" -sidebar_current: "docs-mongodbatlas-datasource-maintenance_window" -description: |- - Provides a Maintenance Window Datasource. ---- - # Data Source: mongodbatlas_maintenance_window `mongodbatlas_maintenance_window` provides a Maintenance Window entry datasource. Gets information regarding the configured maintenance window for a MongoDB Atlas project. diff --git a/website/docs/d/network_container.html.markdown b/website/docs/d/network_container.html.markdown index a5003966b3..eabff26980 100644 --- a/website/docs/d/network_container.html.markdown +++ b/website/docs/d/network_container.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: network_container" -sidebar_current: "docs-mongodbatlas-datasource-network-container" -description: |- - Describes a Cluster resource. ---- - # Data Source: mongodbatlas_network_container `mongodbatlas_network_container` describes a Network Peering Container. The resource requires your Project ID and container ID. diff --git a/website/docs/d/network_containers.html.markdown b/website/docs/d/network_containers.html.markdown index 19542a0e72..8418e6bd2d 100644 --- a/website/docs/d/network_containers.html.markdown +++ b/website/docs/d/network_containers.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: network_containers" -sidebar_current: "docs-mongodbatlas-datasource-network-containers" -description: |- - Describes all Network Peering Containers in the project. ---- - # Data Source: mongodbatlas_network_containers `mongodbatlas_network_containers` describes all Network Peering Containers. The data source requires your Project ID. diff --git a/website/docs/d/network_peering.html.markdown b/website/docs/d/network_peering.html.markdown index e83a9d9383..40ab373db6 100644 --- a/website/docs/d/network_peering.html.markdown +++ b/website/docs/d/network_peering.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: network_peering" -sidebar_current: "docs-mongodbatlas-datasource-network-peering" -description: |- - Describes a Network Peering. ---- - # Data Source: mongodbatlas_network_peering `mongodbatlas_network_peering` describes a Network Peering Connection. diff --git a/website/docs/d/network_peerings.html.markdown b/website/docs/d/network_peerings.html.markdown index bd68e63013..a1206c717d 100644 --- a/website/docs/d/network_peerings.html.markdown +++ b/website/docs/d/network_peerings.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: network_peerings" -sidebar_current: "docs-mongodbatlas-datasource-network-peerings" -description: |- - Describes all Network Peering Connections. ---- - # Data Source: mongodbatlas_network_peerings `mongodbatlas_network_peerings` describes all Network Peering Connections. diff --git a/website/docs/d/online_archive.html.markdown b/website/docs/d/online_archive.html.markdown index 470f1cd6c6..377f41b985 100644 --- a/website/docs/d/online_archive.html.markdown +++ b/website/docs/d/online_archive.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: online_archive" -sidebar_current: "docs-mongodbatlas-datasource-online-archive" -description: |- - Describes an Online Archive ---- - # Data Source: mongodbatlas_online_archive `mongodbatlas_online_archive` describes an Online Archive diff --git a/website/docs/d/online_archives.html.markdown b/website/docs/d/online_archives.html.markdown index 7b02dd0e8f..67e77ce0f7 100644 --- a/website/docs/d/online_archives.html.markdown +++ b/website/docs/d/online_archives.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: online_archives" -sidebar_current: "docs-mongodbatlas-datasource-online-archives" -description: |- - Describes the list of all the online archives for a cluster ---- - # Data Source: mongodbatlas_online_archive `mongodbatlas_online_archive` Describes the list of all the online archives for a cluster diff --git a/website/docs/d/org_invitation.html.markdown b/website/docs/d/org_invitation.html.markdown index 9415e96c94..421e4e26c2 100644 --- a/website/docs/d/org_invitation.html.markdown +++ b/website/docs/d/org_invitation.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: org_invitation" -sidebar_current: "docs-mongodbatlas-datasource-organization-invitation" -description: |- - Provides an Atlas Organization Invitation. ---- - # Data Source: mongodbatlas_org_invitation `mongodbatlas_org_invitation` describes an invitation for a user to join an Atlas organization. diff --git a/website/docs/d/organization.html.markdown b/website/docs/d/organization.html.markdown index 207356c6f8..c20693e935 100644 --- a/website/docs/d/organization.html.markdown +++ b/website/docs/d/organization.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: organization" -sidebar_current: "docs-mongodbatlas-datasource-organization" -description: |- - Describes an Organization. ---- - # Data Source: mongodbatlas_organization -`mongodbatlas_organization` describe all MongoDB Atlas Organizations. This represents organizations that have been created. +`mongodbatlas_organization` describes all MongoDB Atlas Organizations. This represents organizations that have been created. ## Example Usage diff --git a/website/docs/d/organizations.html.markdown b/website/docs/d/organizations.html.markdown index 4e2e7c40c6..d33d75d6b2 100644 --- a/website/docs/d/organizations.html.markdown +++ b/website/docs/d/organizations.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: organizations" -sidebar_current: "docs-mongodbatlas-organizations" -description: |- - Describes Organizations. ---- - # Data Source: mongodbatlas_organizations -`mongodbatlas_organizations` describe all MongoDB Atlas Organizations. This represents organizations that have been created. +`mongodbatlas_organizations` describes all MongoDB Atlas Organizations. This represents organizations that have been created. ## Example Usage diff --git a/website/docs/d/private_endpoint_regional_mode.html.markdown b/website/docs/d/private_endpoint_regional_mode.html.markdown index 140898db44..8972932587 100644 --- a/website/docs/d/private_endpoint_regional_mode.html.markdown +++ b/website/docs/d/private_endpoint_regional_mode.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: private_endpoint_regional_mode" -sidebar_current: "docs-mongodbatlas-datasource-private-endpoint-regional-mode" -description: |- - Describes a Private Endpoint Regional Mode ---- - # Data Source: private_endpoint_regional_mode -`private_endpoint_regional_mode` describe a Private Endpoint Regional Mode. This represents a Private Endpoint Regional Mode Connection that wants to retrieve settings of an Atlas project. +`private_endpoint_regional_mode` describes a Private Endpoint Regional Mode. This represents a Private Endpoint Regional Mode Connection that wants to retrieve settings of an Atlas project. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/d/privatelink_endpoint.html.markdown b/website/docs/d/privatelink_endpoint.html.markdown index f90f34779b..7d7923804c 100644 --- a/website/docs/d/privatelink_endpoint.html.markdown +++ b/website/docs/d/privatelink_endpoint.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: private_endpoint" -sidebar_current: "docs-mongodbatlas-datasource-private-endpoint" -description: |- - Describes a Private Endpoint. ---- - # Data Source: mongodbatlas_privatelink_endpoint -`mongodbatlas_privatelink_endpoint` describe a Private Endpoint. This represents a Private Endpoint Connection to retrieve details regarding a private endpoint by id in an Atlas project +`mongodbatlas_privatelink_endpoint` describes a Private Endpoint. This represents a Private Endpoint Connection to retrieve details regarding a private endpoint by id in an Atlas project -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/d/privatelink_endpoint_service.html.markdown b/website/docs/d/privatelink_endpoint_service.html.markdown index 39622a0419..13a40f7525 100644 --- a/website/docs/d/privatelink_endpoint_service.html.markdown +++ b/website/docs/d/privatelink_endpoint_service.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: private_endpoint_link" -sidebar_current: "docs-mongodbatlas-datasource-private-endpoint-link" -description: |- - Describes a Private Endpoint Link. ---- - # Data Source: mongodbatlas_privatelink_endpoint_service -`mongodbatlas_privatelink_endpoint_service` describe a Private Endpoint Link. This represents a Private Endpoint Link Connection that wants to retrieve details in an Atlas project. +`mongodbatlas_privatelink_endpoint_service` describes a Private Endpoint Link. This represents a Private Endpoint Link Connection that wants to retrieve details in an Atlas project. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/d/privatelink_endpoint_service_data_federation_online_archive.html.markdown b/website/docs/d/privatelink_endpoint_service_data_federation_online_archive.html.markdown index e4ba17ac66..953d7b4087 100644 --- a/website/docs/d/privatelink_endpoint_service_data_federation_online_archive.html.markdown +++ b/website/docs/d/privatelink_endpoint_service_data_federation_online_archive.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_privatelink_endpoint_service_data_federation_online_archive" -sidebar_current: "docs-mongodbatlas-data-source-privatelink-endpoint-service-data-federation-online-archive" -description: |- - Provides a data source for a Private Endpoint Service Data Federation Online Archive. ---- - # Data Source: mongodbatlas_privatelink_endpoint_service_data_federation_online_archive `mongodbatlas_privatelink_endpoint_service_data_federation_online_archive` describes a Private Endpoint Service resource for Data Federation and Online Archive. diff --git a/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.html.markdown b/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.html.markdown index 94ad013c98..75aa36e8fd 100644 --- a/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.html.markdown +++ b/website/docs/d/privatelink_endpoint_service_data_federation_online_archives.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_privatelink_endpoint_service_data_federation_online_archives" -sidebar_current: "docs-mongodbatlas-data-source-privatelink-endpoint-service-data-federation-online-archives" -description: |- - Provides a data source for a Private Endpoints Service Data Federation Online Archive. ---- - # Data Source: mongodbatlas_privatelink_endpoint_service_data_federation_online_archives `mongodbatlas_privatelink_endpoint_service_data_federation_online_archives` describes Private Endpoint Service resources for Data Federation and Online Archive. diff --git a/website/docs/d/privatelink_endpoint_service_serverless.html.markdown b/website/docs/d/privatelink_endpoint_service_serverless.html.markdown index a249f9e0e3..224a17c1e0 100644 --- a/website/docs/d/privatelink_endpoint_service_serverless.html.markdown +++ b/website/docs/d/privatelink_endpoint_service_serverless.html.markdown @@ -1,15 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: privatelink_endpoint_service_serverless" -sidebar_current: "docs-mongodbatlas-datasource-privatelink-endpoint-service-serverless" -description: |- -Describes a Serverless PrivateLink Endpoint Service ---- - - # Data Source: privatelink_endpoint_service_serverless -`privatelink_endpoint_service_serverless` Provides a Serverless PrivateLink Endpoint Service resource. +`privatelink_endpoint_service_serverless` provides a Serverless PrivateLink Endpoint Service resource. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/d/privatelink_endpoints_service_adl.html.markdown b/website/docs/d/privatelink_endpoints_service_adl.html.markdown index c435fd0976..fcddf7e800 100644 --- a/website/docs/d/privatelink_endpoints_service_adl.html.markdown +++ b/website/docs/d/privatelink_endpoints_service_adl.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: privatelink_endpoints_service_adl" -sidebar_current: "docs-mongodbatlas-datasource-privatelink-endpoints-service-adl" -description: |- -Describes the list of all Atlas Data Lake and Online Archive PrivateLink endpoints. ---- - # Data Source: privatelink_endpoints_service_adl -`privatelink_endpoints_service_adl` Describes the list of all Atlas Data Lake (ADL) and Online Archive PrivateLink endpoints resource. +`privatelink_endpoints_service_adl` describes the list of all Atlas Data Lake (ADL) and Online Archive PrivateLink endpoints resource. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/d/privatelink_endpoints_service_serverless.html.markdown b/website/docs/d/privatelink_endpoints_service_serverless.html.markdown index 3ddd95d2c3..6740e49d52 100644 --- a/website/docs/d/privatelink_endpoints_service_serverless.html.markdown +++ b/website/docs/d/privatelink_endpoints_service_serverless.html.markdown @@ -1,15 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: privatelink_endpoints_service_serverless" -sidebar_current: "docs-mongodbatlas-datasource-privatelink-endpoints-service-serverless" -description: |- -Describes the list of all Serverless PrivateLink Endpoint Service ---- - - # Data Source: privatelink_endpoints_service_serverless -`privatelink_endpoints_service_serverless` Describes the list of all Serverless PrivateLink Endpoint Service resource. +`privatelink_endpoints_service_serverless` describes the list of all Serverless PrivateLink Endpoint Service resource. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/d/project.html.markdown b/website/docs/d/project.html.markdown index 0f53077b93..49e9c49b36 100644 --- a/website/docs/d/project.html.markdown +++ b/website/docs/d/project.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project" -sidebar_current: "docs-mongodbatlas-datasource-project" -description: |- - Describes a Project. ---- - # Data Source: mongodbatlas_project `mongodbatlas_project` describes a MongoDB Atlas Project. This represents a project that has been created. diff --git a/website/docs/d/project_api_key.html.markdown b/website/docs/d/project_api_key.html.markdown index 37b47e06b9..a97c1058b0 100644 --- a/website/docs/d/project_api_key.html.markdown +++ b/website/docs/d/project_api_key.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project_api_key" -sidebar_current: "docs-mongodbatlas-datasource-project-api-key" -description: |- - Describes a Project API Key. ---- - # Data Source: mongodbatlas_project_api_key `mongodbatlas_project_api_key` describes a MongoDB Atlas Project API Key. This represents a Project API Key that has been created. diff --git a/website/docs/d/project_api_keys.html.markdown b/website/docs/d/project_api_keys.html.markdown index c21fc4513a..22f3be6ac8 100644 --- a/website/docs/d/project_api_keys.html.markdown +++ b/website/docs/d/project_api_keys.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: api_keys" -sidebar_current: "docs-mongodbatlas-api-keys" -description: |- - Describes a API Keys. ---- - # Data Source: mongodbatlas_api_keys -`mongodbatlas_api_keys` describe all API Keys. This represents API Keys that have been created. +`mongodbatlas_api_keys` describes all API Keys. This represents API Keys that have been created. ~> **IMPORTANT WARNING:** Managing Atlas Programmatic API Keys (PAKs) with Terraform will expose sensitive organizational secrets in Terraform's state. We suggest following [Terraform's best practices](https://developer.hashicorp.com/terraform/language/state/sensitive-data). You may also want to consider managing your PAKs via a more secure method, such as the [HashiCorp Vault MongoDB Atlas Secrets Engine](https://developer.hashicorp.com/vault/docs/secrets/mongodbatlas). diff --git a/website/docs/d/project_invitation.html.markdown b/website/docs/d/project_invitation.html.markdown index 975d276aad..856498fa11 100644 --- a/website/docs/d/project_invitation.html.markdown +++ b/website/docs/d/project_invitation.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project_invitation" -sidebar_current: "docs-mongodbatlas-datasource-project-invitation" -description: |- - Provides an Atlas project invitation. ---- - # Data Source: mongodbatlas_project_invitation `mongodbatlas_project_invitation` describes an invitation to a user to join an Atlas project. diff --git a/website/docs/d/project_ip_access_list.html.markdown b/website/docs/d/project_ip_access_list.html.markdown index 4be8b91d57..132d969968 100644 --- a/website/docs/d/project_ip_access_list.html.markdown +++ b/website/docs/d/project_ip_access_list.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project_ip_access_list" -sidebar_current: "docs-mongodbatlas-datasource-project-ip-access-list" -description: |- - Provides an IP Access List resource. ---- - # Data Source: mongodbatlas_project_ip_access_list `mongodbatlas_project_ip_access_list` describes an IP Access List entry resource. The access list grants access from IPs, CIDRs or AWS Security Groups (if VPC Peering is enabled) to clusters within the Project. diff --git a/website/docs/d/projects.html.markdown b/website/docs/d/projects.html.markdown index 1aeb5df4b9..e1dea0a3a3 100644 --- a/website/docs/d/projects.html.markdown +++ b/website/docs/d/projects.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: projects" -sidebar_current: "docs-mongodbatlas-projects" -description: |- - Describes a Projects. ---- - # Data Source: mongodbatlas_projects -`mongodbatlas_projects` describe all Projects. This represents projects that have been created. +`mongodbatlas_projects` describes all Projects. This represents projects that have been created. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/push_based_log_export.html.markdown b/website/docs/d/push_based_log_export.html.markdown index e03254a091..3775db298a 100644 --- a/website/docs/d/push_based_log_export.html.markdown +++ b/website/docs/d/push_based_log_export.html.markdown @@ -1,14 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_push_based_log_export" -sidebar_current: "docs-mongodbatlas-datasource-push-based-log-export" -description: |- - "Provides a data source for push-based log export feature." ---- - # Data Source: mongodbatlas_push_based_log_export - `mongodbatlas_push_based_log_export` describes the configured project level settings for the push-based log export feature. ## Example Usages diff --git a/website/docs/d/roles_org_id.html.markdown b/website/docs/d/roles_org_id.html.markdown index 899e582696..be4e87b7de 100644 --- a/website/docs/d/roles_org_id.html.markdown +++ b/website/docs/d/roles_org_id.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: roles_org_id" -sidebar_current: "docs-mongodbatlas-datasource-roles-org-id" -description: |- - Describes a Roles Org ID. ---- - # Data Source: mongodbatlas_roles_org_id `mongodbatlas_roles_org_id` describes a MongoDB Atlas Roles Org ID. This represents a Roles Org ID. diff --git a/website/docs/d/search_deployment.html.markdown b/website/docs/d/search_deployment.html.markdown index 568211492b..92e24e3b98 100644 --- a/website/docs/d/search_deployment.html.markdown +++ b/website/docs/d/search_deployment.html.markdown @@ -1,14 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_search_deployment" -sidebar_current: "docs-mongodbatlas-datasource-search-deployment" -description: |- - "Provides a Search Deployment data source." ---- - # Data Source: mongodbatlas_search_deployment - `mongodbatlas_search_deployment` describes a search node deployment. ## Example Usages diff --git a/website/docs/d/search_index.html.markdown b/website/docs/d/search_index.html.markdown index 2eae237422..cd3bf0255f 100644 --- a/website/docs/d/search_index.html.markdown +++ b/website/docs/d/search_index.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: search index" -sidebar_current: "docs-mongodbatlas-datasource-search-index" -description: |- -Describes a Search Index. ---- - # Data Source: mongodbatlas_search_index -`mongodbatlas_search_index` describe a single search indexes. This represents a single search index that have been created. +`mongodbatlas_search_index` describes a single search indexes. This represents a single search index that have been created. > **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/search_indexes.html.markdown b/website/docs/d/search_indexes.html.markdown index 84b346244b..abc56a6e0d 100644 --- a/website/docs/d/search_indexes.html.markdown +++ b/website/docs/d/search_indexes.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: search indexes" -sidebar_current: "docs-mongodbatlas-datasource-search-indexes" -description: |- -Describes a Search Indexes. ---- - # Data Source: mongodbatlas_search_indexes -`mongodbatlas_search_indexes` describe all search indexes. This represents search indexes that have been created. +`mongodbatlas_search_indexes` describes all search indexes. This represents search indexes that have been created. > **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/serverless_instance.html.markdown b/website/docs/d/serverless_instance.html.markdown index de3257683d..48a0be84d7 100644 --- a/website/docs/d/serverless_instance.html.markdown +++ b/website/docs/d/serverless_instance.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: serverless instance" -sidebar_current: "docs-mongodbatlas-datasource-serverless-instance" -description: |- -Provides a Serverless Instance. ---- - # Data Source: mongodbatlas_serverless_instance -`mongodbatlas_serverless_instance` describe a single serverless instance. This represents a single serverless instance that have been created. +`mongodbatlas_serverless_instance` describes a single serverless instance. This represents a single serverless instance that have been created. > **NOTE:** Serverless instances do not support some Atlas features at this time. For a full list of unsupported features, see [Serverless Instance Limitations](https://docs.atlas.mongodb.com/reference/serverless-instance-limitations/). diff --git a/website/docs/d/serverless_instances.html.markdown b/website/docs/d/serverless_instances.html.markdown index 403a1a94f5..5dfb38816f 100644 --- a/website/docs/d/serverless_instances.html.markdown +++ b/website/docs/d/serverless_instances.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: serverless instances" -sidebar_current: "docs-mongodbatlas-datasource-serverless-instances" -description: |- -Describes a Serverless Instances. ---- - # Data Source: mongodbatlas_serverless_instances -`mongodbatlas_serverless_instances` describe all serverless instances. This represents serverless instances that have been created for the specified group id. +`mongodbatlas_serverless_instances` describes all serverless instances. This represents serverless instances that have been created for the specified group id. > **NOTE:** Serverless instances do not support some Atlas features at this time. For a full list of unsupported features, see [Serverless Instance Limitations](https://docs.atlas.mongodb.com/reference/serverless-instance-limitations/). diff --git a/website/docs/d/stream_connection.html.markdown b/website/docs/d/stream_connection.html.markdown index 33221592d3..242837f186 100644 --- a/website/docs/d/stream_connection.html.markdown +++ b/website/docs/d/stream_connection.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: stream connection" -sidebar_current: "docs-mongodbatlas-datasource-stream-connection" -description: |- - Describes an Atlas Stream Processing connection. ---- - # Data Source: mongodbatlas_stream_connection `mongodbatlas_stream_connection` describes a stream connection. diff --git a/website/docs/d/stream_connections.html.markdown b/website/docs/d/stream_connections.html.markdown index b80a31d912..6bdbfe2261 100644 --- a/website/docs/d/stream_connections.html.markdown +++ b/website/docs/d/stream_connections.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: stream connections" -sidebar_current: "docs-mongodbatlas-datasource-stream-connections" -description: |- - Describes all connections of the Atlas Stream Processing instance for the specified project. ---- - # Data Source: mongodbatlas_stream_connections `mongodbatlas_stream_connections` describes all connections of a stream instance for the specified project. diff --git a/website/docs/d/stream_instance.html.markdown b/website/docs/d/stream_instance.html.markdown index a848fc46f4..8da78e5110 100644 --- a/website/docs/d/stream_instance.html.markdown +++ b/website/docs/d/stream_instance.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: stream instance" -sidebar_current: "docs-mongodbatlas-datasource-stream-instance" -description: |- - Describes a Stream Instance. ---- - # Data Source: mongodbatlas_stream_instance `mongodbatlas_stream_instance` describes a stream instance. diff --git a/website/docs/d/stream_instances.html.markdown b/website/docs/d/stream_instances.html.markdown index 0c9197aa8a..f02a878763 100644 --- a/website/docs/d/stream_instances.html.markdown +++ b/website/docs/d/stream_instances.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: stream instances" -sidebar_current: "docs-mongodbatlas-datasource-stream-instances" -description: |- - Describes stream instances of a project. ---- - # Data Source: mongodbatlas_stream_instances `mongodbatlas_stream_instances` describes the stream instances defined in a project. diff --git a/website/docs/d/team.html.markdown b/website/docs/d/team.html.markdown index 96547da436..a15e880541 100644 --- a/website/docs/d/team.html.markdown +++ b/website/docs/d/team.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: team" -sidebar_current: "docs-mongodbatlas-datasource-team" -description: |- - Describes a Team. ---- - # Data Source: mongodbatlas_team `mongodbatlas_team` describes a Team. The resource requires your Organization ID, Project ID and Team ID. diff --git a/website/docs/d/teams.html.markdown b/website/docs/d/teams.html.markdown index 0aa4ede9c8..139c3ff5f0 100644 --- a/website/docs/d/teams.html.markdown +++ b/website/docs/d/teams.html.markdown @@ -1,11 +1,9 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: teams" -sidebar_current: "docs-mongodbatlas-datasource-teams" -description: |- - Describes a Team. +subcategory: "Deprecated" --- +**WARNING:** This datasource is deprecated, use `mongodbatlas_team` + # Data Source: mongodbatlas_teams This data source is deprecated. Please transition to using `mongodbatlas_team` which defines the same underlying implementation, aligning the name of the data source with the implementation which fetches a single team. diff --git a/website/docs/d/third_party_integration.markdown b/website/docs/d/third_party_integration.markdown index 0d4894f046..bd2c9c25aa 100644 --- a/website/docs/d/third_party_integration.markdown +++ b/website/docs/d/third_party_integration.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: third_party_integration" -sidebar_current: "docs-mongodbatlas-datasource-third-party-integration" -description: |- - Describes all Third-Party Integration Settings in the project. ---- - # Data Source: mongodbatlas_third_party_integration -`mongodbatlas_third_party_integration` describe a Third-Party Integration Settings for the given type. +`mongodbatlas_third_party_integration` describes a Third-Party Integration Settings for the given type. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/third_party_integrations.markdown b/website/docs/d/third_party_integrations.markdown index caccd881bc..c177cc3490 100644 --- a/website/docs/d/third_party_integrations.markdown +++ b/website/docs/d/third_party_integrations.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: third_party_integrations" -sidebar_current: "docs-mongodbatlas-datasource-third-party-integrations" -description: |- - Describes all Third-Party Integration Settings in the project. ---- - # Data Source: mongodbatlas_third_party_integrations -`mongodbatlas_third_party_integrations` describe all Third-Party Integration Settings. This represents two Third-Party services `PAGER_DUTY` and `DATADOG` +`mongodbatlas_third_party_integrations` describes all Third-Party Integration Settings. This represents two Third-Party services `PAGER_DUTY` and `DATADOG` applied across the project. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/d/x509_authentication_database_user.html.markdown b/website/docs/d/x509_authentication_database_user.html.markdown index a08f1eb1a5..e3a9509289 100644 --- a/website/docs/d/x509_authentication_database_user.html.markdown +++ b/website/docs/d/x509_authentication_database_user.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: x509_authentication_database_user" -sidebar_current: "docs-mongodbatlas-datasource-x509-authentication-database-user" -description: |- - Describes a Custom DB Role. ---- - # Data Source: mongodbatlas_x509_authentication_database_user -`mongodbatlas_x509_authentication_database_user` describe a X509 Authentication Database User. This represents a X509 Authentication Database User. +`mongodbatlas_x509_authentication_database_user` describes a X509 Authentication Database User. This represents a X509 Authentication Database User. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index fcf4c541a7..7bef25fda4 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "Provider: MongoDB Atlas" -sidebar_current: "docs-mongodbatlas-index" -description: |- - The MongoDB Atlas provider is used to interact with the resources supported by MongoDB Atlas. The provider needs to be configured with the proper credentials before it can be used. ---- - # MongoDB Atlas Provider You can use the MongoDB Atlas provider to interact with the resources supported by [MongoDB Atlas](https://www.mongodb.com/cloud/atlas). diff --git a/website/docs/r/access_list_api_key.html.markdown b/website/docs/r/access_list_api_key.html.markdown index e096b1de7e..9a80ac4eb6 100644 --- a/website/docs/r/access_list_api_key.html.markdown +++ b/website/docs/r/access_list_api_key.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: access_list_api_key" -sidebar_current: "docs-mongodbatlas-resource-access_list-api-key" -description: |- - Creates the access list entries for the specified Atlas Organization API Key. ---- - # Resource: mongodbatlas_access_list_api_key `mongodbatlas_access_list_api_key` provides an IP Access List entry resource. The access list grants access from IPs, CIDRs or AWS Security Groups (if VPC Peering is enabled) to clusters within the Project. diff --git a/website/docs/r/advanced_cluster.html.markdown b/website/docs/r/advanced_cluster.html.markdown index 8da32009f5..ef93549c0f 100644 --- a/website/docs/r/advanced_cluster.html.markdown +++ b/website/docs/r/advanced_cluster.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: advanced_cluster" -sidebar_current: "docs-mongodbatlas-resource-advanced-cluster" -description: |- - Provides an Advanced Cluster resource. ---- - # Resource: mongodbatlas_advanced_cluster `mongodbatlas_advanced_cluster` provides an Advanced Cluster resource. The resource lets you create, edit and delete advanced clusters. The resource requires your Project ID. diff --git a/website/docs/r/alert_configuration.html.markdown b/website/docs/r/alert_configuration.html.markdown index 7dc7373cf8..fe3df3f8d6 100644 --- a/website/docs/r/alert_configuration.html.markdown +++ b/website/docs/r/alert_configuration.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: alert_configuration" -sidebar_current: "docs-mongodbatlas-resource-alert-configuration" -description: |- - Provides an Alert Configuration resource. ---- - # Resource: mongodbatlas_alert_configuration `mongodbatlas_alert_configuration` provides an Alert Configuration resource to define the conditions that trigger an alert and the methods of notification within a MongoDB Atlas project. diff --git a/website/docs/r/api_key.html.markdown b/website/docs/r/api_key.html.markdown index 415953a1fd..13c5d7b555 100644 --- a/website/docs/r/api_key.html.markdown +++ b/website/docs/r/api_key.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: api_key" -sidebar_current: "docs-mongodbatlas-resource-api-key" -description: |- - Provides a API Key resource. ---- - # Resource: mongodbatlas_api_key `mongodbatlas_api_key` provides a Organization API key resource. This allows an Organizational API key to be created. diff --git a/website/docs/r/auditing.html.markdown b/website/docs/r/auditing.html.markdown index e0a1c816ee..444375770d 100644 --- a/website/docs/r/auditing.html.markdown +++ b/website/docs/r/auditing.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: auditing" -sidebar_current: "docs-mongodbatlas-resource-auditing" -description: |- - Provides a Auditing resource. ---- - # Resource: mongodbatlas_auditing `mongodbatlas_auditing` provides an Auditing resource. This allows auditing to be created. diff --git a/website/docs/r/backup_compliance_policy.html.markdown b/website/docs/r/backup_compliance_policy.html.markdown index 06c8f6b5e8..47454a054e 100644 --- a/website/docs/r/backup_compliance_policy.html.markdown +++ b/website/docs/r/backup_compliance_policy.html.markdown @@ -1,10 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: backup_compliance_policy" -sidebar_current: "docs-mongodbatlas-resource-backup-compliance-policy" -description: |- - Provides a Backup Compliance Policy resource. ---- # Resource: mongodbatlas_backup_compliance_policy `mongodbatlas_backup_compliance_policy` provides a resource that enables you to set up a Backup Compliance Policy resource. [Backup Compliance Policy ](https://www.mongodb.com/docs/atlas/backup/cloud-backup/backup-compliance-policy) prevents any user, regardless of role, from modifying or deleting specific cluster settings, backups, and backup configurations. When enabled, the Backup Compliance Policy will be applied as the minimum policy for all clusters and backups in the project. It can only be disabled by contacting MongoDB support. This feature is only supported for cluster tiers M10+. diff --git a/website/docs/r/cloud_backup_schedule.html.markdown b/website/docs/r/cloud_backup_schedule.html.markdown index 65fad8bd4a..ab064c2968 100644 --- a/website/docs/r/cloud_backup_schedule.html.markdown +++ b/website/docs/r/cloud_backup_schedule.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_schedule" -sidebar_current: "docs-mongodbatlas-resource-cloud-backup-schedule" -description: |- - Provides a Cloud Backup Schedule resource. ---- - # Resource: mongodbatlas_cloud_backup_schedule `mongodbatlas_cloud_backup_schedule` provides a cloud backup schedule resource. The resource lets you create, read, update and delete a cloud backup schedule. diff --git a/website/docs/r/cloud_backup_snapshot.html.markdown b/website/docs/r/cloud_backup_snapshot.html.markdown index 7b49f04a3f..1563886195 100644 --- a/website/docs/r/cloud_backup_snapshot.html.markdown +++ b/website/docs/r/cloud_backup_snapshot.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot" -sidebar_current: "docs-mongodbatlas-resource-cloud_backup_snapshot" -description: |- - Provides a Cloud Backup Snapshot resource. ---- - # Resource: mongodbatlas_cloud_backup_snapshot `mongodbatlas_cloud_backup_snapshot` provides a resource to take a cloud backup snapshot on demand. diff --git a/website/docs/r/cloud_backup_snapshot_export_bucket.html.markdown b/website/docs/r/cloud_backup_snapshot_export_bucket.html.markdown index e3f46f56fe..2ffef835aa 100644 --- a/website/docs/r/cloud_backup_snapshot_export_bucket.html.markdown +++ b/website/docs/r/cloud_backup_snapshot_export_bucket.html.markdown @@ -1,13 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_export_bucket" -sidebar_current: "docs-mongodbatlas-resource-cloud_backup_snapshot_export_bucket" -description: |- - Provides a Cloud Backup Snapshot Export Bucket resource. ---- - # Resource: mongodbatlas_cloud_backup_snapshot_export_bucket -`mongodbatlas_cloud_backup_snapshot_export_bucket` resource allows you to create an export snapshot bucket for the specified project. + +`mongodbatlas_cloud_backup_snapshot_export_bucket` allows you to create an export snapshot bucket for the specified project. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/r/cloud_backup_snapshot_export_job.html.markdown b/website/docs/r/cloud_backup_snapshot_export_job.html.markdown index cae182da9a..2fdc724104 100644 --- a/website/docs/r/cloud_backup_snapshot_export_job.html.markdown +++ b/website/docs/r/cloud_backup_snapshot_export_job.html.markdown @@ -1,13 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_export_job" -sidebar_current: "docs-mongodbatlas-resource-cloud_backup_snapshot_export_job" -description: |- - Provides a Cloud Backup Snapshot Export Job resource. ---- - # Resource: mongodbatlas_cloud_backup_snapshot_export_job -`mongodbatlas_cloud_backup_snapshot_export_job` resource allows you to create a cloud backup snapshot export job for the specified project. + +`mongodbatlas_cloud_backup_snapshot_export_job` allows you to create a cloud backup snapshot export job for the specified project. -> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation. diff --git a/website/docs/r/cloud_backup_snapshot_restore_job.html.markdown b/website/docs/r/cloud_backup_snapshot_restore_job.html.markdown index 0a805146ed..2f4d5e0b6e 100644 --- a/website/docs/r/cloud_backup_snapshot_restore_job.html.markdown +++ b/website/docs/r/cloud_backup_snapshot_restore_job.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_backup_snapshot_restore_job" -sidebar_current: "docs-mongodbatlas-resource-cloud_backup_snapshot_restore_job" -description: |- - Provides a Cloud Backup Snapshot Restore Job resource. ---- - # Resource: mongodbatlas_cloud_backup_snapshot_restore_job `mongodbatlas_cloud_backup_snapshot_restore_job` provides a resource to create a new restore job from a cloud backup snapshot of a specified cluster. The restore job must define one of three delivery types: diff --git a/website/docs/r/cloud_provider_access.markdown b/website/docs/r/cloud_provider_access.markdown index 80eb8f700f..331f250bb8 100644 --- a/website/docs/r/cloud_provider_access.markdown +++ b/website/docs/r/cloud_provider_access.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_cloud_provider_access" -sidebar_current: "docs-mongodbatlas-resource-cloud-provider-access" -description: |- - Provides a Cloud Provider Access settings resource for registration, authorization, and deauthorization ---- - # Resource: Cloud Provider Access Configuration Paths The Terraform MongoDB Atlas Provider offers the following path to perform an authorization for a cloud provider role - diff --git a/website/docs/r/cloud_provider_snapshot.html.markdown b/website/docs/r/cloud_provider_snapshot.html.markdown index adecd6d2ac..6d7f756632 100644 --- a/website/docs/r/cloud_provider_snapshot.html.markdown +++ b/website/docs/r/cloud_provider_snapshot.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshot" -sidebar_current: "docs-mongodbatlas-resource-cloud_provider_snapshot" -description: |- - Provides an Cloud Backup Snapshot resource. +subcategory: "Deprecated" --- **WARNING:** This resource is deprecated, use `mongodbatlas_cloud_backup_snapshot` diff --git a/website/docs/r/cloud_provider_snapshot_backup_policy.html.markdown b/website/docs/r/cloud_provider_snapshot_backup_policy.html.markdown index 70c6e07cd1..91c057cb2a 100644 --- a/website/docs/r/cloud_provider_snapshot_backup_policy.html.markdown +++ b/website/docs/r/cloud_provider_snapshot_backup_policy.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshot_backup_policy" -sidebar_current: "docs-mongodbatlas-resource-cloud-provider-snapshot-backup-policy" -description: |- - Provides a Cloud Backup Snapshot Policy resource. +subcategory: "Deprecated" --- **WARNING:** This resource is deprecated, use `mongodbatlas_cloud_backup_schedule` diff --git a/website/docs/r/cloud_provider_snapshot_restore_job.html.markdown b/website/docs/r/cloud_provider_snapshot_restore_job.html.markdown index 218dae9e61..5df4d1b533 100644 --- a/website/docs/r/cloud_provider_snapshot_restore_job.html.markdown +++ b/website/docs/r/cloud_provider_snapshot_restore_job.html.markdown @@ -1,9 +1,5 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cloud_provider_snapshot_restore_job" -sidebar_current: "docs-mongodbatlas-resource-cloud_provider_snapshot_restore_job" -description: |- - Provides a Cloud Backup Snapshot Restore Job resource. +subcategory: "Deprecated" --- **WARNING:** This resource is deprecated, use `mongodbatlas_cloud_backup_snapshot_restore_job` diff --git a/website/docs/r/cluster.html.markdown b/website/docs/r/cluster.html.markdown index 58778f380f..9faad9b92f 100644 --- a/website/docs/r/cluster.html.markdown +++ b/website/docs/r/cluster.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cluster" -sidebar_current: "docs-mongodbatlas-resource-cluster" -description: |- - Provides a Cluster resource. ---- - # Resource: mongodbatlas_cluster `mongodbatlas_cluster` provides a Cluster resource. The resource lets you create, edit and delete clusters. The resource requires your Project ID. diff --git a/website/docs/r/cluster_outage_simulation.html.markdown b/website/docs/r/cluster_outage_simulation.html.markdown index 0410e4b1e3..ee2a5bc3d3 100644 --- a/website/docs/r/cluster_outage_simulation.html.markdown +++ b/website/docs/r/cluster_outage_simulation.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: cluster_outage_simulation" -sidebar_current: "docs-mongodbatlas-resource-federated-database-instance" -description: |- - Provides a Cluster Outage Simulation resource. ---- - # Resource: mongodbatlas_cluster_outage_simulation `mongodbatlas_cluster_outage_simulation` provides a Cluster Outage Simulation resource. For more details see https://www.mongodb.com/docs/atlas/tutorial/test-resilience/simulate-regional-outage/ diff --git a/website/docs/r/custom_db_role.html.markdown b/website/docs/r/custom_db_role.html.markdown index abe09dea92..9125e1fb80 100644 --- a/website/docs/r/custom_db_role.html.markdown +++ b/website/docs/r/custom_db_role.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: custom_db_role" -sidebar_current: "docs-mongodbatlas-resource-custom-db-role" -description: |- - Provides a Custom DB Role resource. ---- - # Resource: mongodbatlas_custom_db_role `mongodbatlas_custom_db_role` provides a Custom DB Role resource. The customDBRoles resource lets you retrieve, create and modify the custom MongoDB roles in your cluster. Use custom MongoDB roles to specify custom sets of actions which cannot be described by the built-in Atlas database user privileges. diff --git a/website/docs/r/custom_dns_configuration_cluster_aws.markdown b/website/docs/r/custom_dns_configuration_cluster_aws.markdown index 083932cd28..b9337d9060 100644 --- a/website/docs/r/custom_dns_configuration_cluster_aws.markdown +++ b/website/docs/r/custom_dns_configuration_cluster_aws.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: custom_dns_configuration_cluster_aws" -sidebar_current: "docs-mongodbatlas-resource-custom_dns_configuration_cluster_aws" -description: |- - Provides a Custom DNS Configuration for Atlas Clusters on AWS resource. ---- - # Resource: mongodbatlas_custom_dns_configuration_cluster_aws `mongodbatlas_custom_dns_configuration_cluster_aws` provides a Custom DNS Configuration for Atlas Clusters on AWS resource. This represents a Custom DNS Configuration for Atlas Clusters on AWS that can be updated in an Atlas project. diff --git a/website/docs/r/data_lake_pipeline.html.markdown b/website/docs/r/data_lake_pipeline.html.markdown index cd531ce932..a32acac567 100644 --- a/website/docs/r/data_lake_pipeline.html.markdown +++ b/website/docs/r/data_lake_pipeline.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: data_lake" -sidebar_current: "docs-mongodbatlas-resource-data-lake" -description: |- - Provides a Data Lake Pipeline resource. ---- - # Resource: mongodbatlas_data_lake_pipeline `mongodbatlas_data_lake_pipeline` provides a Data Lake Pipeline resource. diff --git a/website/docs/r/database_user.html.markdown b/website/docs/r/database_user.html.markdown index 88f39adbbb..8b812f1b2d 100644 --- a/website/docs/r/database_user.html.markdown +++ b/website/docs/r/database_user.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: database_user" -sidebar_current: "docs-mongodbatlas-resource-database-user" -description: |- - Provides a Database User resource. ---- - # Resource: mongodbatlas_database_user `mongodbatlas_database_user` provides a Database User resource. This represents a database user which will be applied to all clusters within the project. diff --git a/website/docs/r/encryption_at_rest.html.markdown b/website/docs/r/encryption_at_rest.html.markdown index 9d7acde8c3..37e711aa56 100644 --- a/website/docs/r/encryption_at_rest.html.markdown +++ b/website/docs/r/encryption_at_rest.html.markdown @@ -1,14 +1,6 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: encryption_at_rest" -sidebar_current: "docs-mongodbatlas-resource-encryption_at_rest" -description: |- - Provides an Encryption At Rest resource. ---- - # Resource: mongodbatlas_encryption_at_rest -`mongodbatlas_encryption_at_rest` Allows management of encryption at rest for an Atlas project with one of the following providers: +`mongodbatlas_encryption_at_rest` allows management of encryption at rest for an Atlas project with one of the following providers: [Amazon Web Services Key Management Service](https://docs.atlas.mongodb.com/security-aws-kms/#security-aws-kms) [Azure Key Vault](https://docs.atlas.mongodb.com/security-azure-kms/#security-azure-kms) diff --git a/website/docs/r/event_trigger.html.markdown b/website/docs/r/event_trigger.html.markdown index bb7f1146ad..3a310b6bd2 100644 --- a/website/docs/r/event_trigger.html.markdown +++ b/website/docs/r/event_trigger.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: event_trigger" -sidebar_current: "docs-mongodbatlas-resource-event-trigger" -description: |- - Provides a Event Trigger resource. ---- - # Resource: mongodbatlas_event_trigger `mongodbatlas_event_trigger` provides a Event Trigger resource. diff --git a/website/docs/r/federated_database_instance.html.markdown b/website/docs/r/federated_database_instance.html.markdown index bff5078e0a..b69cad0a9d 100644 --- a/website/docs/r/federated_database_instance.html.markdown +++ b/website/docs/r/federated_database_instance.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: federated_database_instance" -sidebar_current: "docs-mongodbatlas-resource-federated-database-instance" -description: |- - Provides a Federated Database Instance resource. ---- - # Resource: mongodbatlas_federated_database_instance `mongodbatlas_federated_database_instance` provides a Federated Database Instance resource. diff --git a/website/docs/r/federated_query_limit.html.markdown b/website/docs/r/federated_query_limit.html.markdown index 333ef7c1e1..de011327c2 100644 --- a/website/docs/r/federated_query_limit.html.markdown +++ b/website/docs/r/federated_query_limit.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: federated_database_query_limit" -sidebar_current: "docs-mongodbatlas-resource-federated-query-limit" -description: |- - Provides a Federated Database Instance Query Limit. ---- - # Resource: mongodbatlas_federated_query_limit `mongodbatlas_federated_query_limit` provides a Federated Database Instance Query Limits resource. To learn more about Atlas Data Federation see https://www.mongodb.com/docs/atlas/data-federation/overview/. diff --git a/website/docs/r/federated_settings_identity_provider.html.markdown b/website/docs/r/federated_settings_identity_provider.html.markdown index 9be3d2d0a3..cef768d251 100644 --- a/website/docs/r/federated_settings_identity_provider.html.markdown +++ b/website/docs/r/federated_settings_identity_provider.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_identity_provider" -sidebar_current: "docs-mongodbatlas-federated-settings-identity-provider" -description: |- - Provides a federated settings Identity Provider resource. ---- - # Resource: mongodbatlas_federated_settings_identity_provider `mongodbatlas_federated_settings_identity_provider` provides an Atlas federated settings identity provider resource provides a subset of settings to be maintained post import of the existing resource. diff --git a/website/docs/r/federated_settings_org_config.html.markdown b/website/docs/r/federated_settings_org_config.html.markdown index e025b5747d..924c5a3252 100644 --- a/website/docs/r/federated_settings_org_config.html.markdown +++ b/website/docs/r/federated_settings_org_config.html.markdown @@ -1,16 +1,7 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_org_config" -sidebar_current: "docs-mongodbatlas-resource-federated-settings-org-config" -description: |- - Provides a federated settings Organization Configuration. ---- - # Resource: mongodbatlas_federated_settings_org_config `mongodbatlas_federated_settings_org_config` provides an Federated Settings Identity Providers datasource. Atlas Cloud Federated Settings Identity Providers provides federated settings outputs for the configured Identity Providers. - ## Example Usage ~> **IMPORTANT** You **MUST** import this resource before you can manage it with this provider. diff --git a/website/docs/r/federated_settings_org_role_mapping.html.markdown b/website/docs/r/federated_settings_org_role_mapping.html.markdown index 9087a55471..e54277adb1 100644 --- a/website/docs/r/federated_settings_org_role_mapping.html.markdown +++ b/website/docs/r/federated_settings_org_role_mapping.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_federated_settings_org_role_mapping" -sidebar_current: "docs-mongodbatlas-resource-federated-settings-org-role-mapping" -description: |- - Provides a federated settings Role Mapping resource. ---- - # Resource: mongodbatlas_federated_settings_org_role_mapping `mongodbatlas_federated_settings_org_role_mapping` provides an Role Mapping resource. This allows organization role mapping to be created. diff --git a/website/docs/r/global_cluster_config.html.markdown b/website/docs/r/global_cluster_config.html.markdown index 18fe29f942..313e5943e4 100644 --- a/website/docs/r/global_cluster_config.html.markdown +++ b/website/docs/r/global_cluster_config.html.markdown @@ -1,16 +1,7 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: global_cluster_config" -sidebar_current: "docs-mongodbatlas-resource-global-cluster-config" -description: |- - Provides a Global Cluster Configuration resource. ---- - # Resource: mongodbatlas_global_cluster_config `mongodbatlas_global_cluster_config` provides a Global Cluster Configuration resource. - -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. -> **NOTE:** This resource can only be used with Atlas-managed clusters. See doc for `global_cluster_self_managed_sharding` attribute in [`mongodbatlas_advanced_cluster` resource](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster) for more info. diff --git a/website/docs/r/ldap_configuration.html.markdown b/website/docs/r/ldap_configuration.html.markdown index 553ea8574d..f0d6d527df 100644 --- a/website/docs/r/ldap_configuration.html.markdown +++ b/website/docs/r/ldap_configuration.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: ldap-configuration" -sidebar_current: "docs-mongodbatlas-resource-ldap-configuration" -description: |- - Provides a LDAP Configuration resource. ---- - # Resource: mongodbatlas_ldap_configuration `mongodbatlas_ldap_configuration` provides an LDAP Configuration resource. This allows an LDAP configuration for an Atlas project to be crated and managed. This endpoint doesn’t verify connectivity using the provided LDAP over TLS configuration details. To verify a configuration before saving it, use the resource to [verify](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/INTMDB-114/website/docs/r/ldap_verify.html.markdown) the LDAP configuration. diff --git a/website/docs/r/ldap_verify.html.markdown b/website/docs/r/ldap_verify.html.markdown index 0109936e31..4bb9530b2b 100644 --- a/website/docs/r/ldap_verify.html.markdown +++ b/website/docs/r/ldap_verify.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: ldap-verify" -sidebar_current: "docs-mongodbatlas-resource-ldap-verify" -description: |- - Provides a LDAP Verify resource. ---- - # Resource: mongodbatlas_ldap_verify `mongodbatlas_ldap_verify` provides an LDAP Verify resource. This allows a a verification of an LDAP configuration over TLS for an Atlas project. Atlas retains only the most recent request for each project. diff --git a/website/docs/r/maintenance_window.html.markdown b/website/docs/r/maintenance_window.html.markdown index 75d5627ea3..463ea4c97e 100644 --- a/website/docs/r/maintenance_window.html.markdown +++ b/website/docs/r/maintenance_window.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: maintenance_window" -sidebar_current: "docs-mongodbatlas-resource-maintenance_window" -description: |- - Provides an Maintenance Window resource. ---- - # Resource: mongodbatlas_maintenance_window `mongodbatlas_maintenance_window` provides a resource to schedule the maintenance window for your MongoDB Atlas Project and/or set to defer a scheduled maintenance up to two times. Please refer to [Maintenance Windows](https://www.mongodb.com/docs/atlas/tutorial/cluster-maintenance-window/#configure-maintenance-window) documentation for more details. diff --git a/website/docs/r/network_container.html.markdown b/website/docs/r/network_container.html.markdown index c0d0da72cf..f43e35c89b 100644 --- a/website/docs/r/network_container.html.markdown +++ b/website/docs/r/network_container.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: network_container" -sidebar_current: "docs-mongodbatlas-resource-network-container" -description: |- - Provides a Network Peering resource. ---- - # Resource: mongodbatlas_network_container `mongodbatlas_network_container` provides a Network Peering Container resource. The resource lets you create, edit and delete network peering containers. You must delete network peering containers before creating clusters in your project. You can't delete a network peering container if your project contains clusters. The resource requires your Project ID. Each cloud provider requires slightly different attributes so read the argument reference carefully. diff --git a/website/docs/r/network_peering.html.markdown b/website/docs/r/network_peering.html.markdown index a7f211f03e..b62d67e2de 100644 --- a/website/docs/r/network_peering.html.markdown +++ b/website/docs/r/network_peering.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: network_peering" -sidebar_current: "docs-mongodbatlas-resource-network-peering" -description: |- - Provides a Network Peering resource. ---- - # Resource: mongodbatlas_network_peering `mongodbatlas_network_peering` provides a Network Peering Connection resource. The resource lets you create, edit and delete network peering connections. The resource requires your Project ID. diff --git a/website/docs/r/online_archive.html.markdown b/website/docs/r/online_archive.html.markdown index 96e3dd5875..0a8f3a86d0 100644 --- a/website/docs/r/online_archive.html.markdown +++ b/website/docs/r/online_archive.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_online_archive" -sidebar_current: "docs-mongodbatlas-resource-online-archive" -description: |- - Provides a Online Archive resource for creation, update, and delete ---- - # Resource: mongodbatlas_online_archive `mongodbatlas_online_archive` resource provides access to create, edit, pause and resume an online archive for a collection. diff --git a/website/docs/r/org_invitation.html.markdown b/website/docs/r/org_invitation.html.markdown index 073a19ee19..82608ee0de 100644 --- a/website/docs/r/org_invitation.html.markdown +++ b/website/docs/r/org_invitation.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: org_invitation" -sidebar_current: "docs-mongodbatlas-resource-organization-invitation" -description: |- - Provides an Atlas Organization Invitation resource. ---- - # Resource: mongodbatlas_org_invitation `mongodbatlas_org_invitation` invites a user to join an Atlas organization. diff --git a/website/docs/r/organization.html.markdown b/website/docs/r/organization.html.markdown index ca4c602031..8e99c2aad6 100644 --- a/website/docs/r/organization.html.markdown +++ b/website/docs/r/organization.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: organization" -sidebar_current: "docs-mongodbatlas-resource-organization" -description: |- - Provides a Organization resource. ---- - # Resource: mongodbatlas_organization `mongodbatlas_organization` provides programmatic management (including creation) of a MongoDB Atlas Organization resource. diff --git a/website/docs/r/private_endpoint_regional_mode.html.markdown b/website/docs/r/private_endpoint_regional_mode.html.markdown index 54c9109130..b4d2ae9ea1 100644 --- a/website/docs/r/private_endpoint_regional_mode.html.markdown +++ b/website/docs/r/private_endpoint_regional_mode.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: private_endpoint_regional_mode" -sidebar_current: "docs-mongodbatlas-resource-private_endpoint_regional_mode" -description: |- - Provides a Private Endpoint Regional Mode resource ---- - # Resource: private_endpoint_regional_mode `mongodbatlas_private_endpoint_regional_mode` provides a Private Endpoint Regional Mode resource. This represents a regionalized private endpoint setting for a Project. Enable it to allow region specific private endpoints. diff --git a/website/docs/r/privatelink_endpoint.html.markdown b/website/docs/r/privatelink_endpoint.html.markdown index 212fda2c8d..0b4eb81665 100644 --- a/website/docs/r/privatelink_endpoint.html.markdown +++ b/website/docs/r/privatelink_endpoint.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: private_endpoint" -sidebar_current: "docs-mongodbatlas-resource-private_endpoint" -description: |- - Provides a Private Endpoint resource. ---- - # Resource: mongodbatlas_privatelink_endpoint `mongodbatlas_privatelink_endpoint` provides a Private Endpoint resource. This represents a [Private Endpoint Service](https://www.mongodb.com/docs/atlas/security-private-endpoint/#private-endpoint-concepts) that can be created in an Atlas project. diff --git a/website/docs/r/privatelink_endpoint_serverless.html.markdown b/website/docs/r/privatelink_endpoint_serverless.html.markdown index d9b661810c..d5edd9dc3e 100644 --- a/website/docs/r/privatelink_endpoint_serverless.html.markdown +++ b/website/docs/r/privatelink_endpoint_serverless.html.markdown @@ -1,12 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: privatelink_endpoint_serverless" -sidebar_current: "docs-mongodbatlas-datasource-privatelink-endpoint-serverless" -description: |- -Describes a Serverless PrivateLink Endpoint ---- - - # Resource: privatelink_endpoint_serverless `privatelink_endpoint_serverless` Provides a Serverless PrivateLink Endpoint resource. diff --git a/website/docs/r/privatelink_endpoint_service.html.markdown b/website/docs/r/privatelink_endpoint_service.html.markdown index 7e5109c38d..c0bf2b960c 100644 --- a/website/docs/r/privatelink_endpoint_service.html.markdown +++ b/website/docs/r/privatelink_endpoint_service.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: private_endpoint_link" -sidebar_current: "docs-mongodbatlas-resource-private_endpoint_interface_link" -description: |- - Provides a Private Endpoint Link resource. ---- - # Resource: mongodbatlas_privatelink_endpoint_service `mongodbatlas_privatelink_endpoint_service` provides a Private Endpoint Interface Link resource. This represents a Private Endpoint Interface Link, which adds one [Interface Endpoint](https://www.mongodb.com/docs/atlas/security-private-endpoint/#private-endpoint-concepts) to a private endpoint connection in an Atlas project. diff --git a/website/docs/r/privatelink_endpoint_service_data_federation_online_archive.html.markdown b/website/docs/r/privatelink_endpoint_service_data_federation_online_archive.html.markdown index 8de1a15694..1e6dfe1022 100644 --- a/website/docs/r/privatelink_endpoint_service_data_federation_online_archive.html.markdown +++ b/website/docs/r/privatelink_endpoint_service_data_federation_online_archive.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_privatelink_endpoint_service_data_federation_online_archive" -sidebar_current: "docs-mongodbatlas-resource-privatelink-endpoint-service-data-federation-online-archive" -description: |- - Provides a Privatelink Endpoint Service Data Federation Online Archive resource. ---- - # Resource: mongodbatlas_privatelink_endpoint_service_data_federation_online_archive `mongodbatlas_privatelink_endpoint_service_data_federation_online_archive` provides a Private Endpoint Service resource for Data Federation and Online Archive. The resource allows you to create and manage a private endpoint for Federated Database Instances and Online Archives to the specified project. diff --git a/website/docs/r/privatelink_endpoint_service_serverless.html.markdown b/website/docs/r/privatelink_endpoint_service_serverless.html.markdown index 483e453c78..c541969aa0 100644 --- a/website/docs/r/privatelink_endpoint_service_serverless.html.markdown +++ b/website/docs/r/privatelink_endpoint_service_serverless.html.markdown @@ -1,12 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: privatelink_endpoint_service_serverless" -sidebar_current: "docs-mongodbatlas-datasource-privatelink-endpoint-service-serverless" -description: |- -Describes a Serverless PrivateLink Endpoint Service ---- - - # Resource: privatelink_endpoint_service_serverless `privatelink_endpoint_service_serverless` Provides a Serverless PrivateLink Endpoint Service resource. diff --git a/website/docs/r/project.html.markdown b/website/docs/r/project.html.markdown index 3ea692dc19..66c6eea61d 100644 --- a/website/docs/r/project.html.markdown +++ b/website/docs/r/project.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project" -sidebar_current: "docs-mongodbatlas-resource-project" -description: |- - Provides a Project resource. ---- - # Resource: mongodbatlas_project `mongodbatlas_project` provides a Project resource. This allows project to be created. diff --git a/website/docs/r/project_api_key.html.markdown b/website/docs/r/project_api_key.html.markdown index c9b4069193..6404d3d0b9 100644 --- a/website/docs/r/project_api_key.html.markdown +++ b/website/docs/r/project_api_key.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project_api_key" -sidebar_current: "docs-mongodbatlas-resource-project-api-key" -description: |- - Creates and assigns the specified Atlas Organization API Key to the specified Project. Users with the Project Owner role in the project associated with the API key can use the organization API key to access the resources. ---- - # Resource: mongodbatlas_project_api_key `mongodbatlas_project_api_key` provides a Project API Key resource. This allows project API Key to be created. diff --git a/website/docs/r/project_invitation.html.markdown b/website/docs/r/project_invitation.html.markdown index 309b3cee0a..06ae619a4c 100644 --- a/website/docs/r/project_invitation.html.markdown +++ b/website/docs/r/project_invitation.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project_invitation" -sidebar_current: "docs-mongodbatlas-resource-project-invitation" -description: |- - Provides an Atlas Project Invitation resource. ---- - # Resource: mongodbatlas_project_invitation `mongodbatlas_project_invitation` invites a user to join an Atlas project. diff --git a/website/docs/r/project_ip_access_list.html.markdown b/website/docs/r/project_ip_access_list.html.markdown index 05f7a4aa74..5566f23b43 100644 --- a/website/docs/r/project_ip_access_list.html.markdown +++ b/website/docs/r/project_ip_access_list.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: project_ip_access_list" -sidebar_current: "docs-mongodbatlas-resource-project-ip-access-list" -description: |- - Provides an IP Access List resource. ---- - # Resource: mongodbatlas_project_ip_access_list `mongodbatlas_project_ip_access_list` provides an IP Access List entry resource. The access list grants access from IPs, CIDRs or AWS Security Groups (if VPC Peering is enabled) to clusters within the Project. diff --git a/website/docs/r/push_based_log_export.html.markdown b/website/docs/r/push_based_log_export.html.markdown index 9db32f3c20..591a0b1d34 100644 --- a/website/docs/r/push_based_log_export.html.markdown +++ b/website/docs/r/push_based_log_export.html.markdown @@ -1,14 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_push_based_log_export" -sidebar_current: "docs-mongodbatlas-resource-push-based-log-export" -description: |- - "Provides resource for push-based log export feature." ---- - # Resource: mongodbatlas_push_based_log_export - `mongodbatlas_push_based_log_export` provides a resource for push-based log export feature. The resource lets you configure, enable & disable the project level settings for the push-based log export feature. Using this resource you can continually push logs from mongod, mongos, and audit logs to an Amazon S3 bucket. Atlas exports logs every 5 minutes. diff --git a/website/docs/r/search_deployment.html.markdown b/website/docs/r/search_deployment.html.markdown index ea30b6d3c5..7fb1a1e02e 100644 --- a/website/docs/r/search_deployment.html.markdown +++ b/website/docs/r/search_deployment.html.markdown @@ -1,14 +1,5 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: mongodbatlas_search_deployment" -sidebar_current: "docs-mongodbatlas-resource-search-deployment" -description: |- - "Provides a Search Deployment resource." ---- - # Resource: mongodbatlas_search_deployment - `mongodbatlas_search_deployment` provides a Search Deployment resource. The resource lets you create, edit and delete dedicated search nodes in a cluster. -> **NOTE:** For details on supported cloud providers and existing limitations you can visit the [Search Node Documentation](https://www.mongodb.com/docs/atlas/cluster-config/multi-cloud-distribution/#search-nodes-for-workload-isolation). diff --git a/website/docs/r/search_index.html.markdown b/website/docs/r/search_index.html.markdown index b695c7ca1c..87dc3e9f19 100644 --- a/website/docs/r/search_index.html.markdown +++ b/website/docs/r/search_index.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: search index" -sidebar_current: "docs-mongodbatlas-resource-search-index" -description: |- - Provides a Search Index resource. ---- - # Resource: mongodbatlas_search_index `mongodbatlas_search_index` provides a Search Index resource. This allows indexes to be created. diff --git a/website/docs/r/serverless_instance.html.markdown b/website/docs/r/serverless_instance.html.markdown index c4bff5f3a9..03b283632f 100644 --- a/website/docs/r/serverless_instance.html.markdown +++ b/website/docs/r/serverless_instance.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: serverless instance" -sidebar_current: "docs-mongodbatlas-resource-serverless-instance" -description: |- -Provides a Serverless Instance resource. ---- - # Resource: mongodbatlas_serverless_instance `mongodbatlas_serverless_instance` provides a Serverless Instance resource. This allows serverless instances to be created. diff --git a/website/docs/r/stream_connection.html.markdown b/website/docs/r/stream_connection.html.markdown index b9d684b2dd..962ca1831f 100644 --- a/website/docs/r/stream_connection.html.markdown +++ b/website/docs/r/stream_connection.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: stream connection" -sidebar_current: "docs-mongodbatlas-resource-stream-connection" -description: |- - Provides a Stream Connection resource. ---- - # Resource: mongodbatlas_stream_connection `mongodbatlas_stream_connection` provides a Stream Connection resource. The resource lets you create, edit, and delete stream instance connections. diff --git a/website/docs/r/stream_instance.html.markdown b/website/docs/r/stream_instance.html.markdown index 35b51e7640..149de90b8e 100644 --- a/website/docs/r/stream_instance.html.markdown +++ b/website/docs/r/stream_instance.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: stream instance" -sidebar_current: "docs-mongodbatlas-resource-stream-instance" -description: |- - Provides a Stream Instance resource. ---- - # Resource: mongodbatlas_stream_instance `mongodbatlas_stream_instance` provides a Stream Instance resource. The resource lets you create, edit, and delete stream instances in a project. diff --git a/website/docs/r/team.html.markdown b/website/docs/r/team.html.markdown index dd58ab1f14..5b7a0e7368 100644 --- a/website/docs/r/team.html.markdown +++ b/website/docs/r/team.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: team" -sidebar_current: "docs-mongodbatlas-resource-team" -description: |- - Provides a Team resource. ---- - # Resource: mongodbatlas_team `mongodbatlas_team` provides a Team resource. The resource lets you create, edit and delete Teams. Also, Teams can be assigned to multiple projects, and team members’ access to the project is determined by the team’s project role. diff --git a/website/docs/r/teams.html.markdown b/website/docs/r/teams.html.markdown index 25f7498a5b..5db231b8c1 100644 --- a/website/docs/r/teams.html.markdown +++ b/website/docs/r/teams.html.markdown @@ -1,11 +1,9 @@ --- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: teams" -sidebar_current: "docs-mongodbatlas-resource-teams" -description: |- - Provides a Team resource. +subcategory: "Deprecated" --- +**WARNING:** This resource is deprecated, use `mongodbatlas_team` + # Resource: mongodbatlas_teams This resource is deprecated. Please transition to using `mongodbatlas_team` which defines the same underlying implementation, aligning the name of the resource with the implementation which manages a single team. diff --git a/website/docs/r/third_party_integration.markdown b/website/docs/r/third_party_integration.markdown index 6b9d003326..6f0cff660e 100644 --- a/website/docs/r/third_party_integration.markdown +++ b/website/docs/r/third_party_integration.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: third_party_integration" -sidebar_current: "docs-mongodbatlas-datasource-third-party-integration" -description: |- - Provides a Third-Party Integration Settings resource. ---- - # Resource: mongodbatlas_third_party_integration `mongodbatlas_third_party_integration` Provides a Third-Party Integration Settings for the given type. diff --git a/website/docs/r/x509_authentication_database_user.html.markdown b/website/docs/r/x509_authentication_database_user.html.markdown index d3024e9724..b7ff380cb4 100644 --- a/website/docs/r/x509_authentication_database_user.html.markdown +++ b/website/docs/r/x509_authentication_database_user.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "MongoDB Atlas: x509_authentication_database_user" -sidebar_current: "docs-mongodbatlas-resource-x509-authentication-database-user" -description: |- - Provides a X509 Authentication Database User resource. ---- - # Resource: mongodbatlas_x509_authentication_database_user `mongodbatlas_x509_authentication_database_user` provides a X509 Authentication Database User resource. The mongodbatlas_x509_authentication_database_user resource lets you manage MongoDB users who authenticate using X.509 certificates. You can manage these X.509 certificates or let Atlas do it for you. diff --git a/website/docs/troubleshooting.html.markdown b/website/docs/troubleshooting.html.markdown index 2dc5884aba..565f40b754 100644 --- a/website/docs/troubleshooting.html.markdown +++ b/website/docs/troubleshooting.html.markdown @@ -1,11 +1,3 @@ ---- -layout: "mongodbatlas" -page_title: "Provider: MongoDB Atlas" -sidebar_current: "docs-mongodbatlas-troubleshooting" -description: |- - The MongoDB Atlas provider is used to interact with the resources supported by MongoDB Atlas. The provider needs to be configured with the proper credentials before it can be used. ---- - # Troubleshooting The following are some of the common issues/errors encountered when using Terraform Provider for MongoDB Atlas: From 35e2a87a5de88fe4b5eb457542981ede06f00cbc Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Tue, 16 Jul 2024 09:19:43 +0100 Subject: [PATCH 40/84] test: Refactors resource tests to use GetClusterInfo `backup_compliance_policy` (#2415) * test: Support AdvancedConfiguration, MongoDBMajorVersion, RetainBackupsEnabled, EbsVolumeType in cluster * test: refactor test to use GetClusterInfo --- .../resource_backup_compliance_policy_test.go | 103 ++++++------------ internal/testutil/acc/cluster.go | 10 +- internal/testutil/acc/config_formatter.go | 39 ++++++- .../testutil/acc/config_formatter_test.go | 43 ++++++-- 4 files changed, 105 insertions(+), 90 deletions(-) diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go index bd0c81dc53..fc97eae941 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go @@ -14,8 +14,9 @@ import ( ) const ( - resourceName = "mongodbatlas_backup_compliance_policy.backup_policy_res" - dataSourceName = "data.mongodbatlas_backup_compliance_policy.backup_policy" + resourceName = "mongodbatlas_backup_compliance_policy.backup_policy_res" + dataSourceName = "data.mongodbatlas_backup_compliance_policy.backup_policy" + projectIDTerraform = "mongodbatlas_project.test.id" ) func TestAccBackupCompliancePolicy_basic(t *testing.T) { @@ -61,6 +62,20 @@ func TestAccBackupCompliancePolicy_overwriteBackupPolicies(t *testing.T) { orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid conflicts with backup compliance policy projectOwnerID = os.Getenv("MONGODB_ATLAS_PROJECT_OWNER_ID") + req = acc.ClusterRequest{ + AdvancedConfiguration: map[string]any{ + acc.ClusterAdvConfigOplogMinRetentionHours: 8, + }, + ProjectID: projectIDTerraform, + MongoDBMajorVersion: "6.0", + CloudBackup: true, + DiskSizeGb: 12, + RetainBackupsEnabled: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {EbsVolumeType: "STANDARD", AutoScalingDiskGbEnabled: true, NodeCount: 3}, + }, + } + clusterInfo = acc.GetClusterInfo(t, &req) ) resource.ParallelTest(t, resource.TestCase{ @@ -68,10 +83,10 @@ func TestAccBackupCompliancePolicy_overwriteBackupPolicies(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configClusterWithBackupSchedule(projectName, orgID, projectOwnerID), + Config: configClusterWithBackupSchedule(projectName, orgID, projectOwnerID, &clusterInfo), }, { - Config: configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectOwnerID), + Config: configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectOwnerID, &clusterInfo), ExpectError: regexp.MustCompile(`BACKUP_POLICIES_NOT_MEETING_BACKUP_COMPLIANCE_POLICY_REQUIREMENTS`), }, }, @@ -324,39 +339,11 @@ func configWithoutRestoreDays(projectName, orgID, projectOwnerID string) string ` } -func configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectOwnerID string) string { - return acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false) + ` - resource "mongodbatlas_cluster" "test" { - project_id = mongodbatlas_project.test.id - name = "test1" - provider_name = "AWS" - cluster_type = "REPLICASET" - mongo_db_major_version = "6.0" - provider_instance_size_name = "M10" - auto_scaling_compute_enabled = false - cloud_backup = true - auto_scaling_disk_gb_enabled = true - disk_size_gb = 12 - provider_volume_type = "STANDARD" - retain_backups_enabled = true - - advanced_configuration { - oplog_min_retention_hours = 8 - } - - replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - } - +func configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectOwnerID string, info *acc.ClusterInfo) string { + return acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false) + fmt.Sprintf(` + %[1]s resource "mongodbatlas_cloud_backup_schedule" "test" { - cluster_name = mongodbatlas_cluster.test.name + cluster_name = %[2]s.name project_id = mongodbatlas_project.test.id reference_hour_of_day = 3 @@ -367,7 +354,7 @@ func configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectO cloud_provider = "AWS" frequencies = ["DAILY"] region_name = "US_WEST_1" - replication_spec_id = one(mongodbatlas_cluster.test.replication_specs).id + replication_spec_id = one(%[2]s.replication_specs).id should_copy_oplogs = false } } @@ -393,42 +380,14 @@ func configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectO retention_value = 1 } } - ` + `, info.ClusterTerraformStr, info.ClusterResourceName) } -func configClusterWithBackupSchedule(projectName, orgID, projectOwnerID string) string { - return acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false) + ` - resource "mongodbatlas_cluster" "test" { - project_id = mongodbatlas_project.test.id - name = "test1" - provider_name = "AWS" - cluster_type = "REPLICASET" - mongo_db_major_version = "6.0" - provider_instance_size_name = "M10" - auto_scaling_compute_enabled = false - cloud_backup = true - auto_scaling_disk_gb_enabled = true - disk_size_gb = 12 - provider_volume_type = "STANDARD" - retain_backups_enabled = true - - advanced_configuration { - oplog_min_retention_hours = 8 - } - - replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - } - +func configClusterWithBackupSchedule(projectName, orgID, projectOwnerID string, info *acc.ClusterInfo) string { + return acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false) + fmt.Sprintf(` + %[1]s resource "mongodbatlas_cloud_backup_schedule" "test" { - cluster_name = mongodbatlas_cluster.test.name + cluster_name = %[2]s.name project_id = mongodbatlas_project.test.id reference_hour_of_day = 3 @@ -439,11 +398,11 @@ func configClusterWithBackupSchedule(projectName, orgID, projectOwnerID string) cloud_provider = "AWS" frequencies = ["DAILY"] region_name = "US_WEST_1" - replication_spec_id = one(mongodbatlas_cluster.test.replication_specs).id + replication_spec_id = one(%[2]s.replication_specs).id should_copy_oplogs = false } } - ` + `, info.ClusterTerraformStr, info.ClusterResourceName) } func basicChecks() []resource.TestCheckFunc { diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index 0786d03963..c3ad33cbf7 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "go.mongodb.org/atlas-sdk/v20240530002/admin" ) @@ -13,12 +14,15 @@ type ClusterRequest struct { Tags map[string]string ProjectID string ResourceSuffix string + AdvancedConfiguration map[string]any ResourceDependencyName string ClusterName string + MongoDBMajorVersion string ReplicationSpecs []ReplicationSpecRequest DiskSizeGb int CloudBackup bool Geosharded bool + RetainBackupsEnabled bool PitEnabled bool } @@ -94,6 +98,7 @@ type ReplicationSpecRequest struct { Region string InstanceSize string ProviderName string + EbsVolumeType string ExtraRegionConfigs []ReplicationSpecRequest NodeCount int AutoScalingDiskGbEnabled bool @@ -145,8 +150,9 @@ func CloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { RegionName: &req.Region, ProviderName: &req.ProviderName, ElectableSpecs: &admin.HardwareSpec{ - InstanceSize: &req.InstanceSize, - NodeCount: &req.NodeCount, + InstanceSize: &req.InstanceSize, + NodeCount: &req.NodeCount, + EbsVolumeType: conversion.StringPtr(req.EbsVolumeType), }, AutoScaling: &admin.AdvancedAutoScalingSettings{ DiskGB: &admin.DiskGBAutoScaling{Enabled: &req.AutoScalingDiskGbEnabled}, diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index d2052ddfdb..6662fe4fd6 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -76,10 +76,18 @@ func ToSnakeCase(str string) string { return strings.ToLower(snake) } +var ( + ClusterAdvConfigOplogMinRetentionHours = "oplog_min_retention_hours" + knownAdvancedConfig = map[string]bool{ + ClusterAdvConfigOplogMinRetentionHours: true, + } +) + func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { if req == nil || req.ProjectID == "" { return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID set") } + projectID := req.ProjectID req.AddDefaults() specRequests := req.ReplicationSpecs specs := make([]admin.ReplicationSpec, len(specRequests)) @@ -98,17 +106,38 @@ func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceNa resourceType := "mongodbatlas_advanced_cluster" cluster := root.AppendNewBlock("resource", []string{resourceType, resourceSuffix}).Body() clusterRootAttributes := map[string]any{ - "project_id": req.ProjectID, - "cluster_type": clusterTypeStr, - "name": clusterName, - "backup_enabled": req.CloudBackup, - "pit_enabled": req.PitEnabled, + "cluster_type": clusterTypeStr, + "name": clusterName, + "backup_enabled": req.CloudBackup, + "pit_enabled": req.PitEnabled, + "mongo_db_major_version": req.MongoDBMajorVersion, + } + if strings.Contains(req.ProjectID, ".") { + err = setAttributeHcl(cluster, fmt.Sprintf("project_id = %s", projectID)) + if err != nil { + return "", "", "", fmt.Errorf("failed to set project_id = %s", projectID) + } + } else { + clusterRootAttributes["project_id"] = projectID } if req.DiskSizeGb != 0 { clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb } + if req.RetainBackupsEnabled { + clusterRootAttributes["retain_backups_enabled"] = req.RetainBackupsEnabled + } addPrimitiveAttributes(cluster, clusterRootAttributes) cluster.AppendNewline() + if len(req.AdvancedConfiguration) > 0 { + for _, key := range sortStringMapKeysAny(req.AdvancedConfiguration) { + if !knownAdvancedConfig[key] { + return "", "", "", fmt.Errorf("unknown key in advanced configuration: %s", key) + } + } + advancedClusterBlock := cluster.AppendNewBlock("advanced_configuration", nil).Body() + addPrimitiveAttributes(advancedClusterBlock, req.AdvancedConfiguration) + cluster.AppendNewline() + } for i, spec := range specs { err = writeReplicationSpec(cluster, spec) if err != nil { diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 2c5f7b9283..9f43d131e1 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -137,11 +137,17 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { ` var overrideClusterResource = ` resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = true - cluster_type = "GEOSHARDED" - name = "my-name" - pit_enabled = true - project_id = "project" + project_id = mongodbatlas_project.test.id + backup_enabled = true + cluster_type = "GEOSHARDED" + mongo_db_major_version = "6.0" + name = "my-name" + pit_enabled = true + retain_backups_enabled = true + + advanced_configuration { + oplog_min_retention_hours = 8 + } replication_specs { num_shards = 1 @@ -155,8 +161,9 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { disk_gb_enabled = false } electable_specs { - instance_size = "M30" - node_count = 30 + ebs_volume_type = "STANDARD" + instance_size = "M30" + node_count = 30 } } } @@ -374,9 +381,21 @@ func Test_ClusterResourceHcl(t *testing.T) { }, "overrideClusterResource": { overrideClusterResource, - acc.ClusterRequest{ClusterName: clusterName, Geosharded: true, PitEnabled: true, CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE}, - }}, + acc.ClusterRequest{ + ProjectID: "mongodbatlas_project.test.id", + ClusterName: clusterName, + Geosharded: true, + CloudBackup: true, + MongoDBMajorVersion: "6.0", + RetainBackupsEnabled: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE, EbsVolumeType: "STANDARD"}, + }, + PitEnabled: true, + AdvancedConfiguration: map[string]any{ + acc.ClusterAdvConfigOplogMinRetentionHours: 8, + }, + }, }, "twoRegionConfigs": { twoRegionConfigs, @@ -403,7 +422,9 @@ func Test_ClusterResourceHcl(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { req := tc.req - req.ProjectID = "project" + if req.ProjectID == "" { + req.ProjectID = "project" + } config, actualClusterName, actualResourceName, err := acc.ClusterResourceHcl(&req) require.NoError(t, err) assert.Equal(t, "mongodbatlas_advanced_cluster.cluster_info", actualResourceName) From 3f45ccacdf651384d8c73ec786caf085d53c4122 Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Tue, 16 Jul 2024 09:22:17 +0100 Subject: [PATCH 41/84] test: Refactors resource tests to use GetClusterInfo `cluster_outage_simulation` (#2423) * test: support Priority and NodeCountReadOnly * test: Refactors resource tests to use GetClusterInfo `cluster_outage_simulation` * test: reuse test case in migration test * chore: increase timeout to ensure test is passing * test: avoid global variables to ensure no duplicate cluster names * revert delete timeout change --- ...luster_outage_simulation_migration_test.go | 54 +------- ...resource_cluster_outage_simulation_test.go | 115 ++++++++---------- internal/testutil/acc/cluster.go | 15 +++ internal/testutil/acc/config_formatter.go | 6 + .../testutil/acc/config_formatter_test.go | 40 ++++++ 5 files changed, 114 insertions(+), 116 deletions(-) diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go index 3a9cc5f190..3c75ffa36a 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_migration_test.go @@ -3,63 +3,13 @@ package clusteroutagesimulation_test import ( "testing" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" ) func TestMigOutageSimulationCluster_SingleRegion_basic(t *testing.T) { - var ( - projectID = acc.ProjectIDExecution(t) - clusterName = acc.RandomClusterName() - config = configSingleRegion(projectID, clusterName) - ) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { mig.PreCheckBasic(t) }, - CheckDestroy: checkDestroy, - Steps: []resource.TestStep{ - { - ExternalProviders: mig.ExternalProviders(), - Config: config, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttrSet(resourceName, "outage_filters.#"), - resource.TestCheckResourceAttrSet(resourceName, "start_request_date"), - resource.TestCheckResourceAttrSet(resourceName, "simulation_id"), - resource.TestCheckResourceAttrSet(resourceName, "state"), - ), - }, - mig.TestStepCheckEmptyPlan(config), - }, - }) + mig.CreateAndRunTest(t, singleRegionTestCase(t)) } func TestMigOutageSimulationCluster_MultiRegion_basic(t *testing.T) { - var ( - projectID = acc.ProjectIDExecution(t) - clusterName = acc.RandomClusterName() - config = configMultiRegion(projectID, clusterName) - ) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { mig.PreCheckBasic(t) }, - CheckDestroy: checkDestroy, - Steps: []resource.TestStep{ - { - ExternalProviders: mig.ExternalProviders(), - Config: config, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), - resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttrSet(resourceName, "outage_filters.#"), - resource.TestCheckResourceAttrSet(resourceName, "start_request_date"), - resource.TestCheckResourceAttrSet(resourceName, "simulation_id"), - resource.TestCheckResourceAttrSet(resourceName, "state"), - ), - }, - mig.TestStepCheckEmptyPlan(config), - }, - }) + mig.CreateAndRunTest(t, multiRegionTestCase(t)) } diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go index a1224b620e..fedf03e316 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go @@ -17,18 +17,27 @@ const ( ) func TestAccOutageSimulationCluster_SingleRegion_basic(t *testing.T) { + resource.ParallelTest(t, *singleRegionTestCase(t)) +} + +func singleRegionTestCase(t *testing.T) *resource.TestCase { + t.Helper() var ( - projectID = acc.ProjectIDExecution(t) - clusterName = acc.RandomClusterName() + singleRegionRequest = acc.ClusterRequest{ + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_WEST_2", InstanceSize: "M10"}, + }, + } + clusterInfo = acc.GetClusterInfo(t, &singleRegionRequest) + clusterName = clusterInfo.ClusterName ) - - resource.ParallelTest(t, resource.TestCase{ + return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configSingleRegion(projectID, clusterName), + Config: configSingleRegion(&clusterInfo), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -46,22 +55,37 @@ func TestAccOutageSimulationCluster_SingleRegion_basic(t *testing.T) { ), }, }, - }) + } } func TestAccOutageSimulationCluster_MultiRegion_basic(t *testing.T) { + resource.ParallelTest(t, *multiRegionTestCase(t)) +} + +func multiRegionTestCase(t *testing.T) *resource.TestCase { + t.Helper() var ( - projectID = acc.ProjectIDExecution(t) - clusterName = acc.RandomClusterName() + multiRegionRequest = acc.ClusterRequest{ReplicationSpecs: []acc.ReplicationSpecRequest{ + { + Region: "US_EAST_1", + NodeCount: 3, + ExtraRegionConfigs: []acc.ReplicationSpecRequest{ + {Region: "US_EAST_2", NodeCount: 2, Priority: 6}, + {Region: "US_WEST_2", NodeCount: 2, Priority: 5, NodeCountReadOnly: 2}, + }, + }, + }} + clusterInfo = acc.GetClusterInfo(t, &multiRegionRequest) + clusterName = clusterInfo.ClusterName ) - resource.ParallelTest(t, resource.TestCase{ + return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configMultiRegion(projectID, clusterName), + Config: configMultiRegion(&clusterInfo), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), @@ -79,73 +103,36 @@ func TestAccOutageSimulationCluster_MultiRegion_basic(t *testing.T) { ), }, }, - }) + } } -func configSingleRegion(projectID, clusterName string) string { +func configSingleRegion(info *acc.ClusterInfo) string { return fmt.Sprintf(` - resource "mongodbatlas_cluster" "test" { - project_id = %[1]q - name = %[2]q - provider_name = "AWS" - provider_region_name = "US_WEST_2" - provider_instance_size_name = "M10" - } - + %[1]s resource "mongodbatlas_cluster_outage_simulation" "test_outage" { - project_id = %[1]q - cluster_name = %[2]q + project_id = %[2]q + cluster_name = %[3]q outage_filters { cloud_provider = "AWS" region_name = "US_WEST_2" } - depends_on = ["mongodbatlas_cluster.test"] + depends_on = [%[4]s] } data "mongodbatlas_cluster_outage_simulation" "test" { - project_id = %[1]q - cluster_name = %[2]q + project_id = %[2]q + cluster_name = %[3]q depends_on = [mongodbatlas_cluster_outage_simulation.test_outage] } - `, projectID, clusterName) + `, info.ClusterTerraformStr, info.ProjectID, info.ClusterName, info.ClusterResourceName) } -func configMultiRegion(projectID, clusterName string) string { +func configMultiRegion(info *acc.ClusterInfo) string { return fmt.Sprintf(` - resource "mongodbatlas_cluster" "test" { - project_id = %[1]q - name = %[2]q - cluster_type = "REPLICASET" - - provider_name = "AWS" - provider_instance_size_name = "M10" - - replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - regions_config { - region_name = "US_EAST_2" - electable_nodes = 2 - priority = 6 - read_only_nodes = 0 - } - regions_config { - region_name = "US_WEST_2" - electable_nodes = 2 - priority = 5 - read_only_nodes = 2 - } - } - } - + %[1]s resource "mongodbatlas_cluster_outage_simulation" "test_outage" { - project_id = %[1]q - cluster_name = %[2]q + project_id = %[2]q + cluster_name = %[3]q outage_filters { cloud_provider = "AWS" @@ -155,15 +142,15 @@ func configMultiRegion(projectID, clusterName string) string { cloud_provider = "AWS" region_name = "US_EAST_2" } - depends_on = ["mongodbatlas_cluster.test"] + depends_on = [%[4]s] } data "mongodbatlas_cluster_outage_simulation" "test" { - project_id = %[1]q - cluster_name = %[2]q + project_id = %[2]q + cluster_name = %[3]q depends_on = [mongodbatlas_cluster_outage_simulation.test_outage] } - `, projectID, clusterName) + `, info.ClusterTerraformStr, info.ProjectID, info.ClusterName, info.ClusterResourceName) } func checkDestroy(s *terraform.State) error { diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index c3ad33cbf7..ec6fbabec3 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -101,10 +101,15 @@ type ReplicationSpecRequest struct { EbsVolumeType string ExtraRegionConfigs []ReplicationSpecRequest NodeCount int + NodeCountReadOnly int + Priority int AutoScalingDiskGbEnabled bool } func (r *ReplicationSpecRequest) AddDefaults() { + if r.Priority == 0 { + r.Priority = 7 + } if r.NodeCount == 0 { r.NodeCount = 3 } @@ -146,14 +151,24 @@ func ReplicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { } func CloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { + req.AddDefaults() + var readOnly admin.DedicatedHardwareSpec + if req.NodeCountReadOnly != 0 { + readOnly = admin.DedicatedHardwareSpec{ + NodeCount: &req.NodeCountReadOnly, + InstanceSize: &req.InstanceSize, + } + } return admin.CloudRegionConfig{ RegionName: &req.Region, + Priority: &req.Priority, ProviderName: &req.ProviderName, ElectableSpecs: &admin.HardwareSpec{ InstanceSize: &req.InstanceSize, NodeCount: &req.NodeCount, EbsVolumeType: conversion.StringPtr(req.EbsVolumeType), }, + ReadOnlySpecs: &readOnly, AutoScaling: &admin.AdvancedAutoScalingSettings{ DiskGB: &admin.DiskGBAutoScaling{Enabled: &req.AutoScalingDiskGbEnabled}, }, diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index 6662fe4fd6..595ee4009d 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -196,6 +196,12 @@ func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) er nodeSpec := rc.GetElectableSpecs() nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() err = addPrimitiveAttributesViaJSON(nodeSpecBlock, nodeSpec) + + readOnlySpecs := rc.GetReadOnlySpecs() + if readOnlySpecs.GetNodeCount() != 0 { + readOnlyBlock := rcBlock.AppendNewBlock("read_only_specs", nil).Body() + err = addPrimitiveAttributesViaJSON(readOnlyBlock, readOnlySpecs) + } } return err } diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 9f43d131e1..88984a47f9 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -350,6 +350,38 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { value = "test" } +} +` +var readOnlyAndPriority = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 5 + provider_name = "AWS" + region_name = "US_EAST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 5 + } + read_only_specs { + instance_size = "M10" + node_count = 1 + } + } + } + } ` @@ -417,6 +449,14 @@ func Test_ClusterResourceHcl(t *testing.T) { {AutoScalingDiskGbEnabled: true}, }}, }, + "readOnlyAndPriority": { + readOnlyAndPriority, + acc.ClusterRequest{ + ClusterName: clusterName, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Priority: 5, NodeCount: 5, Region: "US_EAST_1", NodeCountReadOnly: 1}, + }}, + }, } ) for name, tc := range testCases { From f9a180922d28b1d3b5b188ace375a0d7607bb8af Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Tue, 16 Jul 2024 09:29:07 +0100 Subject: [PATCH 42/84] test: Fixes DUPLICATE_CLUSTER_NAME failures (#2424) * test: fix DUPLICATE_CLUSTER_NAME online_archive * test: fix DUPLICATE_CLUSTER_NAME backup_snapshot_restore_job --- ...rce_cloud_backup_snapshot_restore_job_test.go | 16 +++++++++------- .../resource_online_archive_migration_test.go | 2 +- .../resource_online_archive_test.go | 15 +++++++-------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go index 25b1dea41f..e14a88c3b8 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go @@ -18,11 +18,13 @@ const ( dataSourceName = "data.mongodbatlas_cloud_backup_snapshot_restore_job.test" ) -var clusterRequest = acc.ClusterRequest{ - CloudBackup: true, - ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Region: "US_WEST_2"}, - }, +func clusterRequest() *acc.ClusterRequest { + return &acc.ClusterRequest{ + CloudBackup: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_WEST_2"}, + }, + } } func TestAccCloudBackupSnapshotRestoreJob_basic(t *testing.T) { @@ -31,7 +33,7 @@ func TestAccCloudBackupSnapshotRestoreJob_basic(t *testing.T) { func TestAccCloudBackupSnapshotRestoreJob_basicDownload(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterInfo = acc.GetClusterInfo(t, clusterRequest()) clusterName = clusterInfo.ClusterName description = fmt.Sprintf("My description in %s", clusterName) retentionInDays = "1" @@ -66,7 +68,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { var ( snapshotsDataSourceName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.test" snapshotsDataSourcePaginationName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.pagination" - clusterInfo = acc.GetClusterInfo(tb, &clusterRequest) + clusterInfo = acc.GetClusterInfo(tb, clusterRequest()) clusterName = clusterInfo.ClusterName description = fmt.Sprintf("My description in %s", clusterName) retentionInDays = "1" diff --git a/internal/service/onlinearchive/resource_online_archive_migration_test.go b/internal/service/onlinearchive/resource_online_archive_migration_test.go index f0edb81963..96fe9c4d1f 100644 --- a/internal/service/onlinearchive/resource_online_archive_migration_test.go +++ b/internal/service/onlinearchive/resource_online_archive_migration_test.go @@ -12,7 +12,7 @@ import ( func TestMigBackupRSOnlineArchiveWithNoChangeBetweenVersions(t *testing.T) { var ( onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" - clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterInfo = acc.GetClusterInfo(t, clusterRequest()) clusterName = clusterInfo.ClusterName projectID = clusterInfo.ProjectID clusterTerraformStr = clusterInfo.ClusterTerraformStr diff --git a/internal/service/onlinearchive/resource_online_archive_test.go b/internal/service/onlinearchive/resource_online_archive_test.go index fecf5469a0..ecb3de2241 100644 --- a/internal/service/onlinearchive/resource_online_archive_test.go +++ b/internal/service/onlinearchive/resource_online_archive_test.go @@ -13,8 +13,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" ) -var ( - clusterRequest = acc.ClusterRequest{ +func clusterRequest() *acc.ClusterRequest { + return &acc.ClusterRequest{ ReplicationSpecs: []acc.ReplicationSpecRequest{ // Must use US_EAST_1 in dev for online_archive to work {AutoScalingDiskGbEnabled: true, Region: "US_EAST_1"}, @@ -23,14 +23,13 @@ var ( "ArchiveTest": "true", "Owner": "test", }, } -) - +} func TestAccBackupRSOnlineArchive(t *testing.T) { var ( onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" onlineArchivesDataSourceName = "data.mongodbatlas_online_archives.all" - clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterInfo = acc.GetClusterInfo(t, clusterRequest()) clusterName = clusterInfo.ClusterName projectID = clusterInfo.ProjectID clusterTerraformStr = clusterInfo.ClusterTerraformStr @@ -127,7 +126,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterInfo = acc.GetClusterInfo(t, clusterRequest()) clusterResourceName = clusterInfo.ClusterResourceName clusterName = clusterInfo.ClusterName projectID = clusterInfo.ProjectID @@ -175,7 +174,7 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { var ( onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" - clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterInfo = acc.GetClusterInfo(t, clusterRequest()) clusterResourceName = clusterInfo.ClusterResourceName clusterName = clusterInfo.ClusterName projectID = clusterInfo.ProjectID @@ -221,7 +220,7 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { func TestAccBackupRSOnlineArchiveInvalidProcessRegion(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, &clusterRequest) + clusterInfo = acc.GetClusterInfo(t, clusterRequest()) clusterTerraformStr = clusterInfo.ClusterTerraformStr cloudProvider = "AWS" clusterResourceName = clusterInfo.ClusterResourceName From fa31ccff3977ffc1b2faa2f8e5bfa858647cfa5d Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Tue, 16 Jul 2024 12:28:49 +0100 Subject: [PATCH 43/84] test: Refactors GetClusterInfo (#2426) * test: support creating a datasource when using GetClusterInfo * test: Add documentation for cluster methods * refactor: move out config_cluster to its own file * refactor: move configClusterGlobal to the only usage file * refactor: remove ProjectIDStr field * test: update references for cluster_info fields * chore: missing whitespace * test: fix missing quotes around projectID * Update internal/testutil/acc/cluster.go Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> --------- Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> --- .../resource_backup_compliance_policy_test.go | 4 +- ...ce_cloud_backup_schedule_migration_test.go | 4 +- .../resource_cloud_backup_schedule_test.go | 84 ++-- ...ce_cloud_backup_snapshot_migration_test.go | 4 +- .../resource_cloud_backup_snapshot_test.go | 20 +- ...e_cloud_backup_snapshot_export_job_test.go | 2 +- ..._cloud_backup_snapshot_restore_job_test.go | 10 +- .../service/cluster/resource_cluster_test.go | 47 ++- ...resource_cluster_outage_simulation_test.go | 8 +- ...source_federated_database_instance_test.go | 4 +- ...ce_global_cluster_config_migration_test.go | 2 +- .../resource_global_cluster_config_test.go | 20 +- .../resource_ldap_configuration_test.go | 4 +- .../resource_online_archive_migration_test.go | 6 +- .../resource_online_archive_test.go | 22 +- ...rce_private_endpoint_regional_mode_test.go | 4 +- internal/testutil/acc/advanced_cluster.go | 45 -- internal/testutil/acc/cluster.go | 66 +-- internal/testutil/acc/config_cluster.go | 160 +++++++ internal/testutil/acc/config_cluster_test.go | 396 ++++++++++++++++++ internal/testutil/acc/config_formatter.go | 125 ------ .../testutil/acc/config_formatter_test.go | 369 ---------------- 22 files changed, 739 insertions(+), 667 deletions(-) create mode 100644 internal/testutil/acc/config_cluster.go create mode 100644 internal/testutil/acc/config_cluster_test.go diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go index fc97eae941..310793e882 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go @@ -380,7 +380,7 @@ func configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectO retention_value = 1 } } - `, info.ClusterTerraformStr, info.ClusterResourceName) + `, info.TerraformStr, info.ResourceName) } func configClusterWithBackupSchedule(projectName, orgID, projectOwnerID string, info *acc.ClusterInfo) string { @@ -402,7 +402,7 @@ func configClusterWithBackupSchedule(projectName, orgID, projectOwnerID string, should_copy_oplogs = false } } - `, info.ClusterTerraformStr, info.ClusterResourceName) + `, info.TerraformStr, info.ResourceName) } func basicChecks() []resource.TestCheckFunc { diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 2caacc8108..435f2e103a 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -30,8 +30,8 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { Config: config, Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 66f13b6235..b2f26f32fd 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -36,7 +36,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -45,7 +45,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.#", "0"), resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.#", "0"), resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.#", "0"), - resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttrSet(dataSourceName, "reference_hour_of_day"), resource.TestCheckResourceAttrSet(dataSourceName, "reference_minute_of_hour"), resource.TestCheckResourceAttrSet(dataSourceName, "restore_window_days"), @@ -64,7 +64,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { }, true), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), @@ -93,7 +93,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.frequency_interval", "1"), resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_unit", "years"), resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_value", "1"), - resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttrSet(dataSourceName, "reference_hour_of_day"), resource.TestCheckResourceAttrSet(dataSourceName, "reference_minute_of_hour"), resource.TestCheckResourceAttrSet(dataSourceName, "restore_window_days"), @@ -107,7 +107,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), @@ -167,7 +167,7 @@ func TestAccBackupRSCloudBackupSchedule_export(t *testing.T) { Config: configExportPolicies(&clusterInfo, policyName, roleName, bucketName), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "20"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "5"), @@ -199,7 +199,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -233,7 +233,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), @@ -259,9 +259,9 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { }, PitEnabled: true, // you cannot copy oplogs when pit is not enabled }) - clusterName = clusterInfo.ClusterName - terraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterName = clusterInfo.Name + terraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName projectID = clusterInfo.ProjectID checkMap = map[string]string{ "cluster_name": clusterName, @@ -345,7 +345,7 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -400,7 +400,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "1"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1")), @@ -413,7 +413,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "2"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "3"), @@ -473,10 +473,10 @@ func checkDestroy(s *terraform.State) error { } func configNoPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -485,16 +485,16 @@ func configNoPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -529,9 +529,9 @@ func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) s data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings bool, p *admin.DiskBackupSnapshotSchedule) string { @@ -592,10 +592,10 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp } func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -607,7 +607,7 @@ func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) retention_value = 1 } } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule, useYearly bool) string { @@ -622,10 +622,10 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedul ` } - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -656,16 +656,16 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedul data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) } func configAzure(info *acc.ClusterInfo, policy *admin.DiskBackupApiPolicyItem) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q policy_item_hourly { frequency_interval = %[3]d @@ -676,16 +676,16 @@ func configAzure(info *acc.ClusterInfo, policy *admin.DiskBackupApiPolicyItem) s data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) + `, info.TerraformNameRef, info.ProjectID, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) } func configAdvancedPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q auto_export_enabled = false reference_hour_of_day = %[3]d @@ -728,14 +728,14 @@ func configAdvancedPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSc retention_value = 1 } } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketName string) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q auto_export_enabled = true reference_hour_of_day = 20 reference_minute_of_hour = "05" @@ -775,12 +775,12 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } resource "mongodbatlas_cloud_provider_access_setup" "setup_only" { - project_id = %[2]s + project_id = %[2]q provider_name = "AWS" } resource "mongodbatlas_cloud_provider_access_authorization" "auth_role" { - project_id = %[2]s + project_id = %[2]q role_id = mongodbatlas_cloud_provider_access_setup.setup_only.role_id aws { iam_assumed_role_arn = aws_iam_role.test_role.arn @@ -788,7 +788,7 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { - project_id = %[2]s + project_id = %[2]q iam_role_id = mongodbatlas_cloud_provider_access_authorization.auth_role.role_id bucket_name = aws_s3_bucket.backup.bucket cloud_provider = "AWS" @@ -837,7 +837,7 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } EOF } - `, info.ClusterNameStr, info.ProjectIDStr, policyName, roleName, bucketName) + `, info.TerraformNameRef, info.ProjectID, policyName, roleName, bucketName) } func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go index f15e1c1f93..164cab06a7 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go @@ -29,8 +29,8 @@ func TestMigBackupRSCloudBackupSnapshot_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "type", "replicaSet"), resource.TestCheckResourceAttr(resourceName, "members.#", "0"), resource.TestCheckResourceAttr(resourceName, "snapshot_ids.#", "0"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "cloud_provider", "AWS"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "retention_in_days", retentionInDays), diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go index b93c361c65..993eebd793 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go @@ -38,8 +38,8 @@ func TestAccBackupRSCloudBackupSnapshot_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "type", "replicaSet"), resource.TestCheckResourceAttr(resourceName, "members.#", "0"), resource.TestCheckResourceAttr(resourceName, "snapshot_ids.#", "0"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "cloud_provider", "AWS"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "retention_in_days", retentionInDays), @@ -47,8 +47,8 @@ func TestAccBackupRSCloudBackupSnapshot_basic(t *testing.T) { resource.TestCheckResourceAttr(dataSourceName, "type", "replicaSet"), resource.TestCheckResourceAttr(dataSourceName, "members.#", "0"), resource.TestCheckResourceAttr(dataSourceName, "snapshot_ids.#", "0"), - resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(dataSourceName, "replica_set_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(dataSourceName, "replica_set_name", clusterInfo.Name), resource.TestCheckResourceAttr(dataSourceName, "cloud_provider", "AWS"), resource.TestCheckResourceAttr(dataSourceName, "description", description), resource.TestCheckResourceAttrSet(dataSourcePluralSimpleName, "results.#"), @@ -147,10 +147,10 @@ func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { } func configBasic(info *acc.ClusterInfo, description, retentionInDays string) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_snapshot" "test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q description = %[3]q retention_in_days = %[4]q } @@ -158,21 +158,21 @@ func configBasic(info *acc.ClusterInfo, description, retentionInDays string) str data "mongodbatlas_cloud_backup_snapshot" "test" { snapshot_id = mongodbatlas_cloud_backup_snapshot.test.snapshot_id cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } data "mongodbatlas_cloud_backup_snapshots" "test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } data "mongodbatlas_cloud_backup_snapshots" "pagination" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q page_num = 1 items_per_page = 5 } - `, info.ClusterNameStr, info.ProjectIDStr, description, retentionInDays) + `, info.TerraformNameRef, info.ProjectID, description, retentionInDays) } func configSharded(projectID, clusterName, description, retentionInDays string) string { diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go index 4b451363e5..7ebf7f5694 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go @@ -58,7 +58,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configBasic(projectID, bucketName, roleName, policyName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr), + Config: configBasic(projectID, bucketName, roleName, policyName, clusterInfo.TerraformNameRef, clusterInfo.TerraformStr), Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go index e14a88c3b8..3f27e3a900 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go @@ -34,12 +34,12 @@ func TestAccCloudBackupSnapshotRestoreJob_basic(t *testing.T) { func TestAccCloudBackupSnapshotRestoreJob_basicDownload(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name description = fmt.Sprintf("My description in %s", clusterName) retentionInDays = "1" useSnapshotID = true - clusterTerraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterTerraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName ) resource.ParallelTest(t, resource.TestCase{ @@ -69,7 +69,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { snapshotsDataSourceName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.test" snapshotsDataSourcePaginationName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.pagination" clusterInfo = acc.GetClusterInfo(tb, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name description = fmt.Sprintf("My description in %s", clusterName) retentionInDays = "1" ) @@ -80,7 +80,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configBasic(clusterInfo.ClusterTerraformStr, clusterInfo.ClusterResourceName, description, retentionInDays), + Config: configBasic(clusterInfo.TerraformStr, clusterInfo.ResourceName, description, retentionInDays), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_type_config.0.automated", "true"), diff --git a/internal/service/cluster/resource_cluster_test.go b/internal/service/cluster/resource_cluster_test.go index 50dedc053c..4e891aced7 100644 --- a/internal/service/cluster/resource_cluster_test.go +++ b/internal/service/cluster/resource_cluster_test.go @@ -603,7 +603,7 @@ func TestAccCluster_Global(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: acc.ConfigClusterGlobal(orgID, projectName, clusterName), + Config: configClusterGlobal(orgID, projectName, clusterName), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), @@ -2290,6 +2290,51 @@ resource "mongodbatlas_cluster" "test" { `, projectID, name, backupEnabled, paused) } +func configClusterGlobal(orgID, projectName, clusterName string) string { + return fmt.Sprintf(` + + resource "mongodbatlas_project" "test" { + org_id = %[1]q + name = %[2]q + } + + resource "mongodbatlas_cluster" test { + project_id = mongodbatlas_project.test.id + name = %[3]q + disk_size_gb = 80 + num_shards = 1 + cloud_backup = false + cluster_type = "GEOSHARDED" + + // Provider Settings "block" + provider_name = "AWS" + provider_instance_size_name = "M30" + + replication_specs { + zone_name = "Zone 1" + num_shards = 2 + regions_config { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + + replication_specs { + zone_name = "Zone 2" + num_shards = 2 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + } + `, orgID, projectName, clusterName) +} + func TestIsMultiRegionCluster(t *testing.T) { tests := []struct { name string diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go index fedf03e316..cd0eb7dae5 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go @@ -29,7 +29,7 @@ func singleRegionTestCase(t *testing.T) *resource.TestCase { }, } clusterInfo = acc.GetClusterInfo(t, &singleRegionRequest) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name ) return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -76,7 +76,7 @@ func multiRegionTestCase(t *testing.T) *resource.TestCase { }, }} clusterInfo = acc.GetClusterInfo(t, &multiRegionRequest) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name ) return &resource.TestCase{ @@ -124,7 +124,7 @@ func configSingleRegion(info *acc.ClusterInfo) string { cluster_name = %[3]q depends_on = [mongodbatlas_cluster_outage_simulation.test_outage] } - `, info.ClusterTerraformStr, info.ProjectID, info.ClusterName, info.ClusterResourceName) + `, info.TerraformStr, info.ProjectID, info.Name, info.ResourceName) } func configMultiRegion(info *acc.ClusterInfo) string { @@ -150,7 +150,7 @@ func configMultiRegion(info *acc.ClusterInfo) string { cluster_name = %[3]q depends_on = [mongodbatlas_cluster_outage_simulation.test_outage] } - `, info.ClusterTerraformStr, info.ProjectID, info.ClusterName, info.ClusterResourceName) + `, info.TerraformStr, info.ProjectID, info.Name, info.ResourceName) } func checkDestroy(s *terraform.State) error { diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go index 8dc6667b01..7bba2984eb 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go @@ -129,7 +129,7 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { ResourceSuffix: "cluster2", } cluster2Info = acc.GetClusterInfo(t, &clusterRequest2) - dependencyTerraform = fmt.Sprintf("%s\n%s", clusterInfo.ClusterTerraformStr, cluster2Info.ClusterTerraformStr) + dependencyTerraform = fmt.Sprintf("%s\n%s", clusterInfo.TerraformStr, cluster2Info.TerraformStr) ) resource.ParallelTest(t, resource.TestCase{ @@ -138,7 +138,7 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { Steps: []resource.TestStep{ { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - Config: configWithCluster(dependencyTerraform, projectID, clusterInfo.ClusterResourceName, cluster2Info.ClusterResourceName, name), + Config: configWithCluster(dependencyTerraform, projectID, clusterInfo.ResourceName, cluster2Info.ResourceName, name), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go index c70697344c..7353bc22cd 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go @@ -27,7 +27,7 @@ func TestMigClusterRSGlobalCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.CA"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_custom_shard_key_hashed", "false"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_shard_key_unique", "false"), diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go index 68cdc51f5e..522305f543 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go @@ -31,7 +31,7 @@ func TestAccClusterRSGlobalCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.CA"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_custom_shard_key_hashed", "false"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_shard_key_unique", "false"), @@ -64,7 +64,7 @@ func TestAccClusterRSGlobalCluster_withAWSAndBackup(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.CA"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), ), }, { @@ -103,7 +103,7 @@ func TestAccClusterRSGlobalCluster_database(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.IE"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.DE"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), ), }, { @@ -174,10 +174,10 @@ func checkDestroy(s *terraform.State) error { } func configBasic(info *acc.ClusterInfo, isCustomShard, isShardKeyUnique bool) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_global_cluster_config" "config" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q managed_namespaces { db = "mydata" @@ -195,16 +195,16 @@ func configBasic(info *acc.ClusterInfo, isCustomShard, isShardKeyUnique bool) st data "mongodbatlas_global_cluster_config" "config" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, isCustomShard, isShardKeyUnique) + `, info.TerraformNameRef, info.ProjectID, isCustomShard, isShardKeyUnique) } func configWithDBConfig(info *acc.ClusterInfo, zones string) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_global_cluster_config" "config" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q managed_namespaces { db = "horizonv2-sg" @@ -233,7 +233,7 @@ func configWithDBConfig(info *acc.ClusterInfo, zones string) string { } %[3]s } - `, info.ClusterNameStr, info.ProjectIDStr, zones) + `, info.TerraformNameRef, info.ProjectID, zones) } const ( diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go index f9eeba0eac..5fb300be5c 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go @@ -37,7 +37,7 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { }, }) projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr ) resource.Test(t, resource.TestCase{ @@ -45,7 +45,7 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configWithVerify(clusterTerraformStr, clusterInfo.ClusterResourceName, projectID, hostname, username, password, caCertificate, cast.ToInt(port), true), + Config: configWithVerify(clusterTerraformStr, clusterInfo.ResourceName, projectID, hostname, username, password, caCertificate, cast.ToInt(port), true), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), diff --git a/internal/service/onlinearchive/resource_online_archive_migration_test.go b/internal/service/onlinearchive/resource_online_archive_migration_test.go index 96fe9c4d1f..6035a59544 100644 --- a/internal/service/onlinearchive/resource_online_archive_migration_test.go +++ b/internal/service/onlinearchive/resource_online_archive_migration_test.go @@ -13,10 +13,10 @@ func TestMigBackupRSOnlineArchiveWithNoChangeBetweenVersions(t *testing.T) { var ( onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterTerraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName deleteExpirationDays = 0 ) if mig.IsProviderVersionAtLeast("1.12.2") { diff --git a/internal/service/onlinearchive/resource_online_archive_test.go b/internal/service/onlinearchive/resource_online_archive_test.go index ecb3de2241..5f2e95b16d 100644 --- a/internal/service/onlinearchive/resource_online_archive_test.go +++ b/internal/service/onlinearchive/resource_online_archive_test.go @@ -30,10 +30,10 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" onlineArchivesDataSourceName = "data.mongodbatlas_online_archives.all" clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterTerraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName ) resource.ParallelTest(t, resource.TestCase{ @@ -127,11 +127,11 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterResourceName = clusterInfo.ClusterResourceName - clusterName = clusterInfo.ClusterName + clusterResourceName = clusterInfo.ResourceName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr ) resource.ParallelTest(t, resource.TestCase{ @@ -175,10 +175,10 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterResourceName = clusterInfo.ClusterResourceName - clusterName = clusterInfo.ClusterName + clusterResourceName = clusterInfo.ResourceName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr cloudProvider = "AWS" processRegion = "US_EAST_1" ) @@ -221,9 +221,9 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { func TestAccBackupRSOnlineArchiveInvalidProcessRegion(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr cloudProvider = "AWS" - clusterResourceName = clusterInfo.ClusterResourceName + clusterResourceName = clusterInfo.ResourceName ) resource.ParallelTest(t, resource.TestCase{ diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go index a020aa6aa8..93be48622b 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go @@ -30,13 +30,13 @@ func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { spec2 = acc.ReplicationSpecRequest{Region: "US_WEST_2", ProviderName: providerName, ZoneName: "Zone 2"} clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{Geosharded: true, DiskSizeGb: 80, ReplicationSpecs: []acc.ReplicationSpecRequest{spec1, spec2}}) projectID = clusterInfo.ProjectID - clusterResourceName = clusterInfo.ClusterResourceName + clusterResourceName = clusterInfo.ResourceName clusterDataName = "data.mongodbatlas_advanced_cluster.test" endpointResources = testConfigUnmanagedAWS( awsAccessKey, awsSecretKey, projectID, providerName, region, endpointResourceSuffix, ) clusterDataSource = modeClusterData(clusterResourceName, resourceName, privatelinkEndpointServiceResourceName) - dependencies = []string{clusterInfo.ClusterTerraformStr, clusterDataSource, endpointResources} + dependencies = []string{clusterInfo.TerraformStr, clusterDataSource, endpointResources} ) resource.Test(t, resource.TestCase{ diff --git a/internal/testutil/acc/advanced_cluster.go b/internal/testutil/acc/advanced_cluster.go index 31c6b27a04..45ccad7a9e 100644 --- a/internal/testutil/acc/advanced_cluster.go +++ b/internal/testutil/acc/advanced_cluster.go @@ -40,51 +40,6 @@ func CheckDestroyCluster(s *terraform.State) error { return nil } -func ConfigClusterGlobal(orgID, projectName, clusterName string) string { - return fmt.Sprintf(` - - resource "mongodbatlas_project" "test" { - org_id = %[1]q - name = %[2]q - } - - resource "mongodbatlas_cluster" test { - project_id = mongodbatlas_project.test.id - name = %[3]q - disk_size_gb = 80 - num_shards = 1 - cloud_backup = false - cluster_type = "GEOSHARDED" - - // Provider Settings "block" - provider_name = "AWS" - provider_instance_size_name = "M30" - - replication_specs { - zone_name = "Zone 1" - num_shards = 2 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - - replication_specs { - zone_name = "Zone 2" - num_shards = 2 - regions_config { - region_name = "US_WEST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - } - `, orgID, projectName, clusterName) -} - func ImportStateClusterIDFunc(resourceName string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { rs, ok := s.RootModule().Resources[resourceName] diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index ec6fbabec3..9298e1da19 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -10,6 +10,8 @@ import ( "go.mongodb.org/atlas-sdk/v20240530002/admin" ) +// ClusterRequest contains configuration for a cluster where all fields are optional and AddDefaults is used for required fields. +// Used together with GetClusterInfo which will set ProjectID if it is unset. type ClusterRequest struct { Tags map[string]string ProjectID string @@ -26,6 +28,7 @@ type ClusterRequest struct { PitEnabled bool } +// AddDefaults ensures the required fields are populated to generate a resource. func (r *ClusterRequest) AddDefaults() { if r.ResourceSuffix == "" { r.ResourceSuffix = defaultClusterResourceSuffix @@ -38,49 +41,52 @@ func (r *ClusterRequest) AddDefaults() { } } +func (r *ClusterRequest) ClusterType() string { + if r.Geosharded { + return "GEOSHARDED" + } + return "REPLICASET" +} + type ClusterInfo struct { - ProjectIDStr string - ProjectID string - ClusterName string - ClusterResourceName string - ClusterNameStr string - ClusterTerraformStr string + ProjectID string + Name string + ResourceName string + TerraformNameRef string + TerraformStr string } const defaultClusterResourceSuffix = "cluster_info" // GetClusterInfo is used to obtain a project and cluster configuration resource. -// When `MONGODB_ATLAS_CLUSTER_NAME` and `MONGODB_ATLAS_PROJECT_ID` are defined, creation of resources is avoided. This is useful for local execution but not intended for CI executions. -// Clusters will be created in project ProjectIDExecution. +// When `MONGODB_ATLAS_CLUSTER_NAME` and `MONGODB_ATLAS_PROJECT_ID` are defined, a data source is created instead. This is useful for local execution but not intended for CI executions. +// Clusters will be created in project ProjectIDExecution or in req.ProjectID which can be both a direct id, e.g., `664610ec80cc36255e634074` or a config reference `mongodbatlas_project.test.id`. func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { tb.Helper() if req == nil { req = new(ClusterRequest) } + hclCreator := ClusterResourceHcl if req.ProjectID == "" { if ExistingClusterUsed() { projectID, clusterName := existingProjectIDClusterName() - return ClusterInfo{ - ProjectIDStr: fmt.Sprintf("%q", projectID), - ProjectID: projectID, - ClusterName: clusterName, - ClusterNameStr: fmt.Sprintf("%q", clusterName), - ClusterTerraformStr: "", - } + req.ProjectID = projectID + req.ClusterName = clusterName + hclCreator = ClusterDatasourceHcl + } else { + req.ProjectID = ProjectIDExecution(tb) } - req.ProjectID = ProjectIDExecution(tb) } - clusterTerraformStr, clusterName, clusterResourceName, err := ClusterResourceHcl(req) + clusterTerraformStr, clusterName, clusterResourceName, err := hclCreator(req) if err != nil { tb.Error(err) } return ClusterInfo{ - ProjectIDStr: fmt.Sprintf("%q", req.ProjectID), - ProjectID: req.ProjectID, - ClusterName: clusterName, - ClusterNameStr: fmt.Sprintf("%s.name", clusterResourceName), - ClusterResourceName: clusterResourceName, - ClusterTerraformStr: clusterTerraformStr, + ProjectID: req.ProjectID, + Name: clusterName, + TerraformNameRef: fmt.Sprintf("%s.name", clusterResourceName), + ResourceName: clusterResourceName, + TerraformStr: clusterTerraformStr, } } @@ -93,6 +99,9 @@ func existingProjectIDClusterName() (projectID, clusterName string) { return os.Getenv("MONGODB_ATLAS_PROJECT_ID"), os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") } +// ReplicationSpecRequest can be used to customize the ReplicationSpecs of a Cluster. +// No fields are required. +// Use `ExtraRegionConfigs` to specify multiple region configs. type ReplicationSpecRequest struct { ZoneName string Region string @@ -128,15 +137,16 @@ func (r *ReplicationSpecRequest) AddDefaults() { } func (r *ReplicationSpecRequest) AllRegionConfigs() []admin.CloudRegionConfig { - config := CloudRegionConfig(*r) + config := cloudRegionConfig(*r) configs := []admin.CloudRegionConfig{config} - for _, extra := range r.ExtraRegionConfigs { - configs = append(configs, CloudRegionConfig(extra)) + for i := range r.ExtraRegionConfigs { + extra := r.ExtraRegionConfigs[i] + configs = append(configs, cloudRegionConfig(extra)) } return configs } -func ReplicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { +func replicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { if req == nil { req = new(ReplicationSpecRequest) } @@ -150,7 +160,7 @@ func ReplicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { } } -func CloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { +func cloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { req.AddDefaults() var readOnly admin.DedicatedHardwareSpec if req.NodeCountReadOnly != 0 { diff --git a/internal/testutil/acc/config_cluster.go b/internal/testutil/acc/config_cluster.go new file mode 100644 index 0000000000..2968356ff9 --- /dev/null +++ b/internal/testutil/acc/config_cluster.go @@ -0,0 +1,160 @@ +package acc + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + "go.mongodb.org/atlas-sdk/v20240530002/admin" +) + +func ClusterDatasourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { + if req == nil || req.ProjectID == "" || req.ClusterName == "" { + return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID and ClusterName set") + } + req.AddDefaults() + f := hclwrite.NewEmptyFile() + root := f.Body() + resourceType := "mongodbatlas_advanced_cluster" + resourceSuffix := req.ResourceSuffix + cluster := root.AppendNewBlock("data", []string{resourceType, resourceSuffix}).Body() + clusterResourceName := fmt.Sprintf("data.%s.%s", resourceType, resourceSuffix) + clusterName = req.ClusterName + clusterRootAttributes := map[string]any{ + "name": clusterName, + } + projectID := req.ProjectID + if strings.Contains(req.ProjectID, ".") { + err = setAttributeHcl(cluster, fmt.Sprintf("project_id = %s", projectID)) + if err != nil { + return "", "", "", fmt.Errorf("failed to set project_id = %s", projectID) + } + } else { + clusterRootAttributes["project_id"] = projectID + } + addPrimitiveAttributes(cluster, clusterRootAttributes) + return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err +} + +func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { + if req == nil || req.ProjectID == "" { + return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID set") + } + projectID := req.ProjectID + req.AddDefaults() + specRequests := req.ReplicationSpecs + specs := make([]admin.ReplicationSpec, len(specRequests)) + for i := range specRequests { + specRequest := specRequests[i] + specs[i] = replicationSpec(&specRequest) + } + clusterName = req.ClusterName + resourceSuffix := req.ResourceSuffix + clusterType := req.ClusterType() + + f := hclwrite.NewEmptyFile() + root := f.Body() + resourceType := "mongodbatlas_advanced_cluster" + cluster := root.AppendNewBlock("resource", []string{resourceType, resourceSuffix}).Body() + clusterRootAttributes := map[string]any{ + "cluster_type": clusterType, + "name": clusterName, + "backup_enabled": req.CloudBackup, + "pit_enabled": req.PitEnabled, + "mongo_db_major_version": req.MongoDBMajorVersion, + } + if strings.Contains(req.ProjectID, ".") { + err = setAttributeHcl(cluster, fmt.Sprintf("project_id = %s", projectID)) + if err != nil { + return "", "", "", fmt.Errorf("failed to set project_id = %s", projectID) + } + } else { + clusterRootAttributes["project_id"] = projectID + } + if req.DiskSizeGb != 0 { + clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb + } + if req.RetainBackupsEnabled { + clusterRootAttributes["retain_backups_enabled"] = req.RetainBackupsEnabled + } + addPrimitiveAttributes(cluster, clusterRootAttributes) + cluster.AppendNewline() + if len(req.AdvancedConfiguration) > 0 { + for _, key := range sortStringMapKeysAny(req.AdvancedConfiguration) { + if !knownAdvancedConfig[key] { + return "", "", "", fmt.Errorf("unknown key in advanced configuration: %s", key) + } + } + advancedClusterBlock := cluster.AppendNewBlock("advanced_configuration", nil).Body() + addPrimitiveAttributes(advancedClusterBlock, req.AdvancedConfiguration) + cluster.AppendNewline() + } + for i, spec := range specs { + err = writeReplicationSpec(cluster, spec) + if err != nil { + return "", "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) + } + } + if len(req.Tags) > 0 { + for _, key := range sortStringMapKeys(req.Tags) { + value := req.Tags[key] + tagBlock := cluster.AppendNewBlock("tags", nil).Body() + tagBlock.SetAttributeValue("key", cty.StringVal(key)) + tagBlock.SetAttributeValue("value", cty.StringVal(value)) + } + } + cluster.AppendNewline() + if req.ResourceDependencyName != "" { + if !strings.Contains(req.ResourceDependencyName, ".") { + return "", "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") + } + err = setAttributeHcl(cluster, fmt.Sprintf("depends_on = [%s]", req.ResourceDependencyName)) + if err != nil { + return "", "", "", err + } + } + clusterResourceName := fmt.Sprintf("%s.%s", resourceType, resourceSuffix) + return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err +} + +func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { + replicationBlock := cluster.AppendNewBlock("replication_specs", nil).Body() + err := addPrimitiveAttributesViaJSON(replicationBlock, spec) + if err != nil { + return err + } + for _, rc := range spec.GetRegionConfigs() { + if rc.Priority == nil { + rc.SetPriority(7) + } + replicationBlock.AppendNewline() + rcBlock := replicationBlock.AppendNewBlock("region_configs", nil).Body() + err = addPrimitiveAttributesViaJSON(rcBlock, rc) + if err != nil { + return err + } + autoScalingBlock := rcBlock.AppendNewBlock("auto_scaling", nil).Body() + if rc.AutoScaling == nil { + autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(false)) + } else { + autoScaling := rc.GetAutoScaling() + asDisk := autoScaling.GetDiskGB() + autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(asDisk.GetEnabled())) + if autoScaling.Compute != nil { + return fmt.Errorf("auto_scaling.compute is not supportd yet %v", autoScaling) + } + } + nodeSpec := rc.GetElectableSpecs() + nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() + err = addPrimitiveAttributesViaJSON(nodeSpecBlock, nodeSpec) + + readOnlySpecs := rc.GetReadOnlySpecs() + if readOnlySpecs.GetNodeCount() != 0 { + readOnlyBlock := rcBlock.AppendNewBlock("read_only_specs", nil).Body() + err = addPrimitiveAttributesViaJSON(readOnlyBlock, readOnlySpecs) + } + } + return err +} diff --git a/internal/testutil/acc/config_cluster_test.go b/internal/testutil/acc/config_cluster_test.go new file mode 100644 index 0000000000..724e0ec8d6 --- /dev/null +++ b/internal/testutil/acc/config_cluster_test.go @@ -0,0 +1,396 @@ +package acc_test + +import ( + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var standardClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var overrideClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + project_id = mongodbatlas_project.test.id + backup_enabled = true + cluster_type = "GEOSHARDED" + mongo_db_major_version = "6.0" + name = "my-name" + pit_enabled = true + retain_backups_enabled = true + + advanced_configuration { + oplog_min_retention_hours = 8 + } + + replication_specs { + num_shards = 1 + zone_name = "Zone X" + + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = "MY_REGION_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + ebs_volume_type = "STANDARD" + instance_size = "M30" + node_count = 30 + } + } + } + +} +` + +var dependsOnClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_project.project_execution] +} +` +var dependsOnMultiResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple] +} +` +var twoReplicationSpecs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + replication_specs { + num_shards = 1 + zone_name = "Zone 2" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var twoRegionConfigs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` + +var autoScalingDiskEnabled = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = true + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + tags { + key = "ArchiveTest" + value = "true" + } + tags { + key = "Owner" + value = "test" + } + +} +` +var readOnlyAndPriority = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 5 + provider_name = "AWS" + region_name = "US_EAST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 5 + } + read_only_specs { + instance_size = "M10" + node_count = 1 + } + } + } + +} +` + +func Test_ClusterResourceHcl(t *testing.T) { + var ( + clusterName = "my-name" + testCases = map[string]struct { + expected string + req acc.ClusterRequest + }{ + "defaults": { + standardClusterResource, + acc.ClusterRequest{ClusterName: clusterName}, + }, + "dependsOn": { + dependsOnClusterResource, + acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, + }, + "dependsOnMulti": { + dependsOnMultiResource, + acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, + }, + "twoReplicationSpecs": { + twoReplicationSpecs, + acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_WEST_1", ZoneName: "Zone 1"}, + {Region: "EU_WEST_2", ZoneName: "Zone 2"}, + }}, + }, + "overrideClusterResource": { + overrideClusterResource, + acc.ClusterRequest{ + ProjectID: "mongodbatlas_project.test.id", + ClusterName: clusterName, + Geosharded: true, + CloudBackup: true, + MongoDBMajorVersion: "6.0", + RetainBackupsEnabled: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE, EbsVolumeType: "STANDARD"}, + }, + PitEnabled: true, + AdvancedConfiguration: map[string]any{ + acc.ClusterAdvConfigOplogMinRetentionHours: 8, + }, + }, + }, + "twoRegionConfigs": { + twoRegionConfigs, + acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + { + Region: "US_WEST_1", + InstanceSize: "M10", + NodeCount: 3, + ExtraRegionConfigs: []acc.ReplicationSpecRequest{{Region: "EU_WEST_1", InstanceSize: "M10", NodeCount: 3, ProviderName: constant.AWS}}, + }, + }, + }, + }, + "autoScalingDiskEnabled": { + autoScalingDiskEnabled, + acc.ClusterRequest{ClusterName: clusterName, Tags: map[string]string{ + "ArchiveTest": "true", "Owner": "test", + }, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {AutoScalingDiskGbEnabled: true}, + }}, + }, + "readOnlyAndPriority": { + readOnlyAndPriority, + acc.ClusterRequest{ + ClusterName: clusterName, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Priority: 5, NodeCount: 5, Region: "US_EAST_1", NodeCountReadOnly: 1}, + }}, + }, + } + ) + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + req := tc.req + if req.ProjectID == "" { + req.ProjectID = "project" + } + config, actualClusterName, actualResourceName, err := acc.ClusterResourceHcl(&req) + require.NoError(t, err) + assert.Equal(t, "mongodbatlas_advanced_cluster.cluster_info", actualResourceName) + assert.Equal(t, clusterName, actualClusterName) + assert.Equal(t, tc.expected, config) + }) + } +} + +var expectedDatasource = ` +data "mongodbatlas_advanced_cluster" "cluster_info" { + name = "my-datasource-cluster" + project_id = "datasource-project" +} +` + +func Test_ClusterDatasourceHcl(t *testing.T) { + expectedClusterName := "my-datasource-cluster" + config, clusterName, resourceName, err := acc.ClusterDatasourceHcl(&acc.ClusterRequest{ + ClusterName: expectedClusterName, + ProjectID: "datasource-project", + }) + require.NoError(t, err) + assert.Equal(t, "data.mongodbatlas_advanced_cluster.cluster_info", resourceName) + assert.Equal(t, expectedClusterName, clusterName) + assert.Equal(t, expectedDatasource, config) +} diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index 595ee4009d..6ee705c87f 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -2,7 +2,6 @@ package acc import ( "encoding/json" - "errors" "fmt" "regexp" "sort" @@ -12,7 +11,6 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" - "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func FormatToHCLMap(m map[string]string, indent, varName string) string { @@ -83,129 +81,6 @@ var ( } ) -func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { - if req == nil || req.ProjectID == "" { - return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID set") - } - projectID := req.ProjectID - req.AddDefaults() - specRequests := req.ReplicationSpecs - specs := make([]admin.ReplicationSpec, len(specRequests)) - for i, specRequest := range specRequests { - specs[i] = ReplicationSpec(&specRequest) - } - clusterName = req.ClusterName - resourceSuffix := req.ResourceSuffix - clusterTypeStr := "REPLICASET" - if req.Geosharded { - clusterTypeStr = "GEOSHARDED" - } - - f := hclwrite.NewEmptyFile() - root := f.Body() - resourceType := "mongodbatlas_advanced_cluster" - cluster := root.AppendNewBlock("resource", []string{resourceType, resourceSuffix}).Body() - clusterRootAttributes := map[string]any{ - "cluster_type": clusterTypeStr, - "name": clusterName, - "backup_enabled": req.CloudBackup, - "pit_enabled": req.PitEnabled, - "mongo_db_major_version": req.MongoDBMajorVersion, - } - if strings.Contains(req.ProjectID, ".") { - err = setAttributeHcl(cluster, fmt.Sprintf("project_id = %s", projectID)) - if err != nil { - return "", "", "", fmt.Errorf("failed to set project_id = %s", projectID) - } - } else { - clusterRootAttributes["project_id"] = projectID - } - if req.DiskSizeGb != 0 { - clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb - } - if req.RetainBackupsEnabled { - clusterRootAttributes["retain_backups_enabled"] = req.RetainBackupsEnabled - } - addPrimitiveAttributes(cluster, clusterRootAttributes) - cluster.AppendNewline() - if len(req.AdvancedConfiguration) > 0 { - for _, key := range sortStringMapKeysAny(req.AdvancedConfiguration) { - if !knownAdvancedConfig[key] { - return "", "", "", fmt.Errorf("unknown key in advanced configuration: %s", key) - } - } - advancedClusterBlock := cluster.AppendNewBlock("advanced_configuration", nil).Body() - addPrimitiveAttributes(advancedClusterBlock, req.AdvancedConfiguration) - cluster.AppendNewline() - } - for i, spec := range specs { - err = writeReplicationSpec(cluster, spec) - if err != nil { - return "", "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) - } - } - if len(req.Tags) > 0 { - for _, key := range sortStringMapKeys(req.Tags) { - value := req.Tags[key] - tagBlock := cluster.AppendNewBlock("tags", nil).Body() - tagBlock.SetAttributeValue("key", cty.StringVal(key)) - tagBlock.SetAttributeValue("value", cty.StringVal(value)) - } - } - cluster.AppendNewline() - if req.ResourceDependencyName != "" { - if !strings.Contains(req.ResourceDependencyName, ".") { - return "", "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") - } - err = setAttributeHcl(cluster, fmt.Sprintf("depends_on = [%s]", req.ResourceDependencyName)) - if err != nil { - return "", "", "", err - } - } - clusterResourceName := fmt.Sprintf("%s.%s", resourceType, resourceSuffix) - return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err -} - -func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { - replicationBlock := cluster.AppendNewBlock("replication_specs", nil).Body() - err := addPrimitiveAttributesViaJSON(replicationBlock, spec) - if err != nil { - return err - } - for _, rc := range spec.GetRegionConfigs() { - if rc.Priority == nil { - rc.SetPriority(7) - } - replicationBlock.AppendNewline() - rcBlock := replicationBlock.AppendNewBlock("region_configs", nil).Body() - err = addPrimitiveAttributesViaJSON(rcBlock, rc) - if err != nil { - return err - } - autoScalingBlock := rcBlock.AppendNewBlock("auto_scaling", nil).Body() - if rc.AutoScaling == nil { - autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(false)) - } else { - autoScaling := rc.GetAutoScaling() - asDisk := autoScaling.GetDiskGB() - autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(asDisk.GetEnabled())) - if autoScaling.Compute != nil { - return fmt.Errorf("auto_scaling.compute is not supportd yet %v", autoScaling) - } - } - nodeSpec := rc.GetElectableSpecs() - nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() - err = addPrimitiveAttributesViaJSON(nodeSpecBlock, nodeSpec) - - readOnlySpecs := rc.GetReadOnlySpecs() - if readOnlySpecs.GetNodeCount() != 0 { - readOnlyBlock := rcBlock.AppendNewBlock("read_only_specs", nil).Body() - err = addPrimitiveAttributesViaJSON(readOnlyBlock, readOnlySpecs) - } - } - return err -} - // addPrimitiveAttributesViaJSON adds "primitive" bool/string/int/float attributes of a struct. func addPrimitiveAttributesViaJSON(b *hclwrite.Body, obj any) error { var objMap map[string]any diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 88984a47f9..16ac5ef7f8 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -4,10 +4,8 @@ import ( "fmt" "testing" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func projectTemplateWithExtra(extra string) string { @@ -106,370 +104,3 @@ func TestFormatToHCLLifecycleIgnore(t *testing.T) { }) } } - -var standardClusterResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - -} -` -var overrideClusterResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - project_id = mongodbatlas_project.test.id - backup_enabled = true - cluster_type = "GEOSHARDED" - mongo_db_major_version = "6.0" - name = "my-name" - pit_enabled = true - retain_backups_enabled = true - - advanced_configuration { - oplog_min_retention_hours = 8 - } - - replication_specs { - num_shards = 1 - zone_name = "Zone X" - - region_configs { - priority = 7 - provider_name = "AZURE" - region_name = "MY_REGION_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - ebs_volume_type = "STANDARD" - instance_size = "M30" - node_count = 30 - } - } - } - -} -` - -var dependsOnClusterResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - - depends_on = [mongodbatlas_project.project_execution] -} -` -var dependsOnMultiResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - - depends_on = [mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple] -} -` -var twoReplicationSpecs = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - replication_specs { - num_shards = 1 - zone_name = "Zone 2" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "EU_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - -} -` -var twoRegionConfigs = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "EU_WEST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - -} -` - -var autoScalingDiskEnabled = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = true - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - tags { - key = "ArchiveTest" - value = "true" - } - tags { - key = "Owner" - value = "test" - } - -} -` -var readOnlyAndPriority = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 5 - provider_name = "AWS" - region_name = "US_EAST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 5 - } - read_only_specs { - instance_size = "M10" - node_count = 1 - } - } - } - -} -` - -func Test_ClusterResourceHcl(t *testing.T) { - var ( - clusterName = "my-name" - testCases = map[string]struct { - expected string - req acc.ClusterRequest - }{ - "defaults": { - standardClusterResource, - acc.ClusterRequest{ClusterName: clusterName}, - }, - "dependsOn": { - dependsOnClusterResource, - acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, - }, - "dependsOnMulti": { - dependsOnMultiResource, - acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, - }, - "twoReplicationSpecs": { - twoReplicationSpecs, - acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Region: "US_WEST_1", ZoneName: "Zone 1"}, - {Region: "EU_WEST_2", ZoneName: "Zone 2"}, - }}, - }, - "overrideClusterResource": { - overrideClusterResource, - acc.ClusterRequest{ - ProjectID: "mongodbatlas_project.test.id", - ClusterName: clusterName, - Geosharded: true, - CloudBackup: true, - MongoDBMajorVersion: "6.0", - RetainBackupsEnabled: true, - ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE, EbsVolumeType: "STANDARD"}, - }, - PitEnabled: true, - AdvancedConfiguration: map[string]any{ - acc.ClusterAdvConfigOplogMinRetentionHours: 8, - }, - }, - }, - "twoRegionConfigs": { - twoRegionConfigs, - acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ - { - Region: "US_WEST_1", - InstanceSize: "M10", - NodeCount: 3, - ExtraRegionConfigs: []acc.ReplicationSpecRequest{{Region: "EU_WEST_1", InstanceSize: "M10", NodeCount: 3, ProviderName: constant.AWS}}, - }, - }, - }, - }, - "autoScalingDiskEnabled": { - autoScalingDiskEnabled, - acc.ClusterRequest{ClusterName: clusterName, Tags: map[string]string{ - "ArchiveTest": "true", "Owner": "test", - }, ReplicationSpecs: []acc.ReplicationSpecRequest{ - {AutoScalingDiskGbEnabled: true}, - }}, - }, - "readOnlyAndPriority": { - readOnlyAndPriority, - acc.ClusterRequest{ - ClusterName: clusterName, - ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Priority: 5, NodeCount: 5, Region: "US_EAST_1", NodeCountReadOnly: 1}, - }}, - }, - } - ) - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - req := tc.req - if req.ProjectID == "" { - req.ProjectID = "project" - } - config, actualClusterName, actualResourceName, err := acc.ClusterResourceHcl(&req) - require.NoError(t, err) - assert.Equal(t, "mongodbatlas_advanced_cluster.cluster_info", actualResourceName) - assert.Equal(t, clusterName, actualClusterName) - assert.Equal(t, tc.expected, config) - }) - } -} From 4e3144c362c08efee67c1f6edb5ff0e3a66efa92 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Tue, 16 Jul 2024 20:30:41 +0200 Subject: [PATCH 44/84] doc: Updates to new Terraform doc structure (#2425) * move to root doc folder * rename ds and resource folders * change file extension to .md * update doc links * gitignore * releasing instructions * git hook * codeowners * workflow template * gha workflows * scripts * remove website-lint * update references to html.markdown * fix compatibility script matrix * rename rest of files * fix generate doc script using docs-out folder to temporary generate all files and copying only to docs folder the specified resource files * fix typo --- .githooks/pre-commit | 6 ---- .github/CODEOWNERS | 3 +- .github/ISSUE_TEMPLATE/Bug_Report.md | 2 +- .github/workflows/check-migration-guide.yml | 2 +- .github/workflows/code-health.yml | 12 +------- .github/workflows/notify-docs-team.yml | 2 +- .github/workflows/release.yml | 4 +-- .gitignore | 6 ---- GNUmakefile | 10 ++----- README.md | 2 +- RELEASING.md | 10 ++----- contributing/documentation.md | 2 +- .../data-sources/access_list_api_key.md | 0 .../data-sources/access_list_api_keys.md | 0 .../data-sources/advanced_cluster.md | 0 .../data-sources/advanced_clusters.md | 0 .../data-sources/alert_configuration.md | 0 .../data-sources/alert_configurations.md | 0 .../data-sources/api_key.md | 0 .../data-sources/api_keys.md | 0 .../data-sources/atlas_user.md | 0 .../data-sources/atlas_users.md | 0 .../data-sources/auditing.md | 0 .../data-sources/backup_compliance_policy.md | 0 .../data-sources/cloud_backup_schedule.md | 0 .../data-sources/cloud_backup_snapshot.md | 0 .../cloud_backup_snapshot_export_bucket.md | 0 .../cloud_backup_snapshot_export_buckets.md | 0 .../cloud_backup_snapshot_export_job.md | 0 .../cloud_backup_snapshot_export_jobs.md | 0 .../cloud_backup_snapshot_restore_job.md | 0 .../cloud_backup_snapshot_restore_jobs.md | 0 .../data-sources/cloud_backup_snapshots.md | 0 .../cloud_provider_access_setup.md | 0 .../cloud_provider_shared_tier_restore_job.md | 0 ...cloud_provider_shared_tier_restore_jobs.md | 0 .../cloud_provider_shared_tier_snapshot.md | 0 .../cloud_provider_shared_tier_snapshots.md | 0 .../data-sources/cloud_provider_snapshot.md | 0 .../cloud_provider_snapshot_backup_policy.md | 0 .../cloud_provider_snapshot_restore_job.md | 0 .../cloud_provider_snapshot_restore_jobs.md | 0 .../data-sources/cloud_provider_snapshots.md | 0 .../data-sources/cluster.md | 0 .../data-sources/cluster_outage_simulation.md | 0 .../data-sources/clusters.md | 0 .../control_plane_ip_addresses.md | 2 +- .../data-sources/custom_db_role.md | 0 .../data-sources/custom_db_roles.md | 0 .../custom_dns_configuration_cluster_aws.md | 0 .../data-sources/data_lake_pipeline.md | 0 .../data-sources/data_lake_pipeline_run.md | 0 .../data-sources/data_lake_pipeline_runs.md | 0 .../data-sources/data_lake_pipelines.md | 0 .../data-sources/database_user.md | 0 .../data-sources/database_users.md | 0 .../data-sources/event_trigger.md | 0 .../data-sources/event_triggers.md | 0 .../federated_database_instance.md | 0 .../federated_database_instances.md | 0 .../data-sources/federated_query_limit.md | 0 .../data-sources/federated_query_limits.md | 0 .../data-sources/federated_settings.md | 0 .../federated_settings_identity_provider.md | 0 .../federated_settings_identity_providers.md | 0 .../federated_settings_org_config.md | 0 .../federated_settings_org_configs.md | 0 .../federated_settings_org_role_mapping.md | 0 .../federated_settings_org_role_mappings.md | 0 .../data-sources/global_cluster_config.md | 0 .../data-sources/ldap_configuration.md | 0 .../data-sources/ldap_verify.md | 0 .../data-sources/maintenance_window.md | 0 .../data-sources/network_container.md | 0 .../data-sources/network_containers.md | 0 .../data-sources/network_peering.md | 0 .../data-sources/network_peerings.md | 0 .../data-sources/online_archive.md | 0 .../data-sources/online_archives.md | 0 .../data-sources/org_invitation.md | 0 .../data-sources/organization.md | 0 .../data-sources/organizations.md | 0 .../private_endpoint_regional_mode.md | 0 .../data-sources/privatelink_endpoint.md | 0 .../privatelink_endpoint_service.md | 0 ..._service_data_federation_online_archive.md | 0 ...service_data_federation_online_archives.md | 0 ...privatelink_endpoint_service_serverless.md | 0 .../privatelink_endpoints_service_adl.md | 0 ...rivatelink_endpoints_service_serverless.md | 0 .../data-sources/project.md | 0 .../data-sources/project_api_key.md | 0 .../data-sources/project_api_keys.md | 0 .../data-sources/project_invitation.md | 0 .../data-sources/project_ip_access_list.md | 0 .../data-sources/projects.md | 0 .../data-sources/push_based_log_export.md | 4 +++ .../data-sources/roles_org_id.md | 0 .../data-sources/search_deployment.md | 0 .../data-sources/search_index.md | 0 .../data-sources/search_indexes.md | 0 .../data-sources/serverless_instance.md | 0 .../data-sources/serverless_instances.md | 0 .../data-sources/stream_connection.md | 0 .../data-sources/stream_connections.md | 0 .../data-sources/stream_instance.md | 0 .../data-sources/stream_instances.md | 0 .../data-sources/team.md | 0 .../data-sources/teams.md | 0 .../data-sources/third_party_integration.md | 0 .../data-sources/third_party_integrations.md | 0 .../x509_authentication_database_user.md | 0 .../guides/0.6.0-upgrade-guide.md | 0 .../guides/0.8.0-upgrade-guide.md | 0 .../guides/0.8.2-upgrade-guide.md | 0 .../guides/0.9.0-upgrade-guide.md | 0 .../guides/0.9.1-upgrade-guide.md | 0 .../guides/1.0.0-upgrade-guide.md | 0 .../guides/1.0.1-upgrade-guide.md | 0 .../guides/1.1.0-upgrade-guide.md | 0 .../guides/1.10.0-upgrade-guide.md | 0 .../guides/1.11.0-upgrade-guide.md | 0 .../guides/1.12.0-upgrade-guide.md | 0 .../guides/1.13.0-upgrade-guide.md | 0 .../guides/1.14.0-upgrade-guide.md | 0 .../guides/1.15.0-upgrade-guide.md | 0 .../guides/1.16.0-upgrade-guide.md | 0 .../guides/1.17.0-upgrade-guide.md | 0 .../guides/1.2.0-upgrade-guide.md | 0 .../guides/1.3.0-upgrade-guide.md | 0 .../guides/1.4.0-upgrade-guide.md | 0 .../guides/1.5.0-upgrade-guide.md | 0 .../guides/1.6.0-upgrade-guide.md | 0 .../guides/1.7.0-upgrade-guide.md | 4 +-- .../guides/1.8.0-upgrade-guide.md | 0 .../guides/1.9.0-upgrade-guide.md | 0 ...ogrammatic-API-Key-upgrade-guide-1.10.0.md | 0 .../docs/index.html.markdown => docs/index.md | 0 .../resources/access_list_api_key.md | 0 .../resources/advanced_cluster.md | 0 .../resources/alert_configuration.md | 0 .../resources/api_key.md | 0 .../resources/auditing.md | 0 .../resources/backup_compliance_policy.md | 0 .../resources/cloud_backup_schedule.md | 0 .../resources/cloud_backup_snapshot.md | 0 .../cloud_backup_snapshot_export_bucket.md | 0 .../cloud_backup_snapshot_export_job.md | 0 .../cloud_backup_snapshot_restore_job.md | 0 .../resources/cloud_provider_access.md | 0 .../resources/cloud_provider_snapshot.md | 0 .../cloud_provider_snapshot_backup_policy.md | 0 .../cloud_provider_snapshot_restore_job.md | 0 .../resources/cluster.md | 0 .../resources/cluster_outage_simulation.md | 0 .../resources/custom_db_role.md | 0 .../custom_dns_configuration_cluster_aws.md | 0 .../resources/data_lake_pipeline.md | 0 .../resources/database_user.md | 0 .../resources/encryption_at_rest.md | 0 .../resources/event_trigger.md | 0 .../resources/federated_database_instance.md | 0 .../resources/federated_query_limit.md | 0 .../federated_settings_identity_provider.md | 0 .../federated_settings_org_config.md | 0 .../federated_settings_org_role_mapping.md | 0 .../resources/global_cluster_config.md | 0 .../resources/ldap_configuration.md | 2 +- .../resources/ldap_verify.md | 0 .../resources/maintenance_window.md | 0 .../resources/network_container.md | 0 .../resources/network_peering.md | 0 .../resources/online_archive.md | 0 .../resources/org_invitation.md | 0 .../resources/organization.md | 0 .../private_endpoint_regional_mode.md | 0 .../resources/privatelink_endpoint.md | 0 .../privatelink_endpoint_serverless.md | 0 .../resources/privatelink_endpoint_service.md | 0 ..._service_data_federation_online_archive.md | 0 ...privatelink_endpoint_service_serverless.md | 0 .../resources/project.md | 0 .../resources/project_api_key.md | 0 .../resources/project_invitation.md | 0 .../resources/project_ip_access_list.md | 0 .../resources/push_based_log_export.md | 11 ++++++- .../resources/search_deployment.md | 0 .../resources/search_index.md | 0 .../resources/serverless_instance.md | 0 .../resources/stream_connection.md | 0 .../resources/stream_instance.md | 0 .../resources/team.md | 0 .../resources/teams.md | 0 .../resources/third_party_integration.md | 0 .../x509_authentication_database_user.md | 0 .../troubleshooting.md | 0 .../global-cluster/README.md | 2 +- .../multi-cloud/README.md | 2 +- .../aws/atlas-cluster/README.md | 2 +- .../aws/multi-region-cluster/README.MD | 2 +- scripts/check-upgrade-guide-exists.sh | 2 +- scripts/generate-doc.sh | 30 ++++++++----------- scripts/update-examples-reference-in-docs.sh | 2 +- scripts/update-tf-compatibility-matrix.sh | 2 +- templates/data-source.md.tmpl | 2 -- .../control_plane_ip_addresses.md.tmpl | 3 +- .../push_based_log_export.md.tmpl | 3 +- .../data-sources/search_deployment.md.tmpl | 3 +- templates/resources.md.tmpl | 2 -- .../resources/push_based_log_export.md.tmpl | 3 +- templates/resources/search_deployment.md.tmpl | 3 +- 211 files changed, 56 insertions(+), 91 deletions(-) rename website/docs/d/access_list_api_key.html.markdown => docs/data-sources/access_list_api_key.md (100%) rename website/docs/d/access_list_api_keys.html.markdown => docs/data-sources/access_list_api_keys.md (100%) rename website/docs/d/advanced_cluster.html.markdown => docs/data-sources/advanced_cluster.md (100%) rename website/docs/d/advanced_clusters.html.markdown => docs/data-sources/advanced_clusters.md (100%) rename website/docs/d/alert_configuration.html.markdown => docs/data-sources/alert_configuration.md (100%) rename website/docs/d/alert_configurations.html.markdown => docs/data-sources/alert_configurations.md (100%) rename website/docs/d/api_key.html.markdown => docs/data-sources/api_key.md (100%) rename website/docs/d/api_keys.html.markdown => docs/data-sources/api_keys.md (100%) rename website/docs/d/atlas_user.html.markdown => docs/data-sources/atlas_user.md (100%) rename website/docs/d/atlas_users.html.markdown => docs/data-sources/atlas_users.md (100%) rename website/docs/d/auditing.html.markdown => docs/data-sources/auditing.md (100%) rename website/docs/d/backup_compliance_policy.html.markdown => docs/data-sources/backup_compliance_policy.md (100%) rename website/docs/d/cloud_backup_schedule.html.markdown => docs/data-sources/cloud_backup_schedule.md (100%) rename website/docs/d/cloud_backup_snapshot.html.markdown => docs/data-sources/cloud_backup_snapshot.md (100%) rename website/docs/d/cloud_backup_snapshot_export_bucket.html.markdown => docs/data-sources/cloud_backup_snapshot_export_bucket.md (100%) rename website/docs/d/cloud_backup_snapshot_export_buckets.html.markdown => docs/data-sources/cloud_backup_snapshot_export_buckets.md (100%) rename website/docs/d/cloud_backup_snapshot_export_job.html.markdown => docs/data-sources/cloud_backup_snapshot_export_job.md (100%) rename website/docs/d/cloud_backup_snapshot_export_jobs.html.markdown => docs/data-sources/cloud_backup_snapshot_export_jobs.md (100%) rename website/docs/d/cloud_backup_snapshot_restore_job.html.markdown => docs/data-sources/cloud_backup_snapshot_restore_job.md (100%) rename website/docs/d/cloud_backup_snapshot_restore_jobs.html.markdown => docs/data-sources/cloud_backup_snapshot_restore_jobs.md (100%) rename website/docs/d/cloud_backup_snapshots.html.markdown => docs/data-sources/cloud_backup_snapshots.md (100%) rename website/docs/d/cloud_provider_access_setup.markdown => docs/data-sources/cloud_provider_access_setup.md (100%) rename website/docs/d/cloud_provider_shared_tier_restore_job.html.markdown => docs/data-sources/cloud_provider_shared_tier_restore_job.md (100%) rename website/docs/d/cloud_provider_shared_tier_restore_jobs.html.markdown => docs/data-sources/cloud_provider_shared_tier_restore_jobs.md (100%) rename website/docs/d/cloud_provider_shared_tier_snapshot.html.markdown => docs/data-sources/cloud_provider_shared_tier_snapshot.md (100%) rename website/docs/d/cloud_provider_shared_tier_snapshots.html.markdown => docs/data-sources/cloud_provider_shared_tier_snapshots.md (100%) rename website/docs/d/cloud_provider_snapshot.html.markdown => docs/data-sources/cloud_provider_snapshot.md (100%) rename website/docs/d/cloud_provider_snapshot_backup_policy.html.markdown => docs/data-sources/cloud_provider_snapshot_backup_policy.md (100%) rename website/docs/d/cloud_provider_snapshot_restore_job.html.markdown => docs/data-sources/cloud_provider_snapshot_restore_job.md (100%) rename website/docs/d/cloud_provider_snapshot_restore_jobs.html.markdown => docs/data-sources/cloud_provider_snapshot_restore_jobs.md (100%) rename website/docs/d/cloud_provider_snapshots.html.markdown => docs/data-sources/cloud_provider_snapshots.md (100%) rename website/docs/d/cluster.html.markdown => docs/data-sources/cluster.md (100%) rename website/docs/d/cluster_outage_simulation.html.markdown => docs/data-sources/cluster_outage_simulation.md (100%) rename website/docs/d/clusters.html.markdown => docs/data-sources/clusters.md (100%) rename website/docs/d/control_plane_ip_addresses.html.markdown => docs/data-sources/control_plane_ip_addresses.md (95%) rename website/docs/d/custom_db_role.html.markdown => docs/data-sources/custom_db_role.md (100%) rename website/docs/d/custom_db_roles.html.markdown => docs/data-sources/custom_db_roles.md (100%) rename website/docs/d/custom_dns_configuration_cluster_aws.html.markdown => docs/data-sources/custom_dns_configuration_cluster_aws.md (100%) rename website/docs/d/data_lake_pipeline.html.markdown => docs/data-sources/data_lake_pipeline.md (100%) rename website/docs/d/data_lake_pipeline_run.html.markdown => docs/data-sources/data_lake_pipeline_run.md (100%) rename website/docs/d/data_lake_pipeline_runs.html.markdown => docs/data-sources/data_lake_pipeline_runs.md (100%) rename website/docs/d/data_lake_pipelines.html.markdown => docs/data-sources/data_lake_pipelines.md (100%) rename website/docs/d/database_user.html.markdown => docs/data-sources/database_user.md (100%) rename website/docs/d/database_users.html.markdown => docs/data-sources/database_users.md (100%) rename website/docs/d/event_trigger.html.markdown => docs/data-sources/event_trigger.md (100%) rename website/docs/d/event_triggers.html.markdown => docs/data-sources/event_triggers.md (100%) rename website/docs/d/federated_database_instance.html.markdown => docs/data-sources/federated_database_instance.md (100%) rename website/docs/d/federated_database_instances.html.markdown => docs/data-sources/federated_database_instances.md (100%) rename website/docs/d/federated_query_limit.html.markdown => docs/data-sources/federated_query_limit.md (100%) rename website/docs/d/federated_query_limits.html.markdown => docs/data-sources/federated_query_limits.md (100%) rename website/docs/d/federated_settings.html.markdown => docs/data-sources/federated_settings.md (100%) rename website/docs/d/federated_settings_identity_provider.html.markdown => docs/data-sources/federated_settings_identity_provider.md (100%) rename website/docs/d/federated_settings_identity_providers.html.markdown => docs/data-sources/federated_settings_identity_providers.md (100%) rename website/docs/d/federated_settings_org_config.html.markdown => docs/data-sources/federated_settings_org_config.md (100%) rename website/docs/d/federated_settings_org_configs.html.markdown => docs/data-sources/federated_settings_org_configs.md (100%) rename website/docs/d/federated_settings_org_role_mapping.html.markdown => docs/data-sources/federated_settings_org_role_mapping.md (100%) rename website/docs/d/federated_settings_org_role_mappings.html.markdown => docs/data-sources/federated_settings_org_role_mappings.md (100%) rename website/docs/d/global_cluster_config.html.markdown => docs/data-sources/global_cluster_config.md (100%) rename website/docs/d/ldap_configuration.html.markdown => docs/data-sources/ldap_configuration.md (100%) rename website/docs/d/ldap_verify.html.markdown => docs/data-sources/ldap_verify.md (100%) rename website/docs/d/maintenance_window.html.markdown => docs/data-sources/maintenance_window.md (100%) rename website/docs/d/network_container.html.markdown => docs/data-sources/network_container.md (100%) rename website/docs/d/network_containers.html.markdown => docs/data-sources/network_containers.md (100%) rename website/docs/d/network_peering.html.markdown => docs/data-sources/network_peering.md (100%) rename website/docs/d/network_peerings.html.markdown => docs/data-sources/network_peerings.md (100%) rename website/docs/d/online_archive.html.markdown => docs/data-sources/online_archive.md (100%) rename website/docs/d/online_archives.html.markdown => docs/data-sources/online_archives.md (100%) rename website/docs/d/org_invitation.html.markdown => docs/data-sources/org_invitation.md (100%) rename website/docs/d/organization.html.markdown => docs/data-sources/organization.md (100%) rename website/docs/d/organizations.html.markdown => docs/data-sources/organizations.md (100%) rename website/docs/d/private_endpoint_regional_mode.html.markdown => docs/data-sources/private_endpoint_regional_mode.md (100%) rename website/docs/d/privatelink_endpoint.html.markdown => docs/data-sources/privatelink_endpoint.md (100%) rename website/docs/d/privatelink_endpoint_service.html.markdown => docs/data-sources/privatelink_endpoint_service.md (100%) rename website/docs/d/privatelink_endpoint_service_data_federation_online_archive.html.markdown => docs/data-sources/privatelink_endpoint_service_data_federation_online_archive.md (100%) rename website/docs/d/privatelink_endpoint_service_data_federation_online_archives.html.markdown => docs/data-sources/privatelink_endpoint_service_data_federation_online_archives.md (100%) rename website/docs/d/privatelink_endpoint_service_serverless.html.markdown => docs/data-sources/privatelink_endpoint_service_serverless.md (100%) rename website/docs/d/privatelink_endpoints_service_adl.html.markdown => docs/data-sources/privatelink_endpoints_service_adl.md (100%) rename website/docs/d/privatelink_endpoints_service_serverless.html.markdown => docs/data-sources/privatelink_endpoints_service_serverless.md (100%) rename website/docs/d/project.html.markdown => docs/data-sources/project.md (100%) rename website/docs/d/project_api_key.html.markdown => docs/data-sources/project_api_key.md (100%) rename website/docs/d/project_api_keys.html.markdown => docs/data-sources/project_api_keys.md (100%) rename website/docs/d/project_invitation.html.markdown => docs/data-sources/project_invitation.md (100%) rename website/docs/d/project_ip_access_list.html.markdown => docs/data-sources/project_ip_access_list.md (100%) rename website/docs/d/projects.html.markdown => docs/data-sources/projects.md (100%) rename website/docs/d/push_based_log_export.html.markdown => docs/data-sources/push_based_log_export.md (96%) rename website/docs/d/roles_org_id.html.markdown => docs/data-sources/roles_org_id.md (100%) rename website/docs/d/search_deployment.html.markdown => docs/data-sources/search_deployment.md (100%) rename website/docs/d/search_index.html.markdown => docs/data-sources/search_index.md (100%) rename website/docs/d/search_indexes.html.markdown => docs/data-sources/search_indexes.md (100%) rename website/docs/d/serverless_instance.html.markdown => docs/data-sources/serverless_instance.md (100%) rename website/docs/d/serverless_instances.html.markdown => docs/data-sources/serverless_instances.md (100%) rename website/docs/d/stream_connection.html.markdown => docs/data-sources/stream_connection.md (100%) rename website/docs/d/stream_connections.html.markdown => docs/data-sources/stream_connections.md (100%) rename website/docs/d/stream_instance.html.markdown => docs/data-sources/stream_instance.md (100%) rename website/docs/d/stream_instances.html.markdown => docs/data-sources/stream_instances.md (100%) rename website/docs/d/team.html.markdown => docs/data-sources/team.md (100%) rename website/docs/d/teams.html.markdown => docs/data-sources/teams.md (100%) rename website/docs/d/third_party_integration.markdown => docs/data-sources/third_party_integration.md (100%) rename website/docs/d/third_party_integrations.markdown => docs/data-sources/third_party_integrations.md (100%) rename website/docs/d/x509_authentication_database_user.html.markdown => docs/data-sources/x509_authentication_database_user.md (100%) rename website/docs/guides/0.6.0-upgrade-guide.html.markdown => docs/guides/0.6.0-upgrade-guide.md (100%) rename website/docs/guides/0.8.0-upgrade-guide.html.markdown => docs/guides/0.8.0-upgrade-guide.md (100%) rename website/docs/guides/0.8.2-upgrade-guide.html.markdown => docs/guides/0.8.2-upgrade-guide.md (100%) rename website/docs/guides/0.9.0-upgrade-guide.html.markdown => docs/guides/0.9.0-upgrade-guide.md (100%) rename website/docs/guides/0.9.1-upgrade-guide.html.markdown => docs/guides/0.9.1-upgrade-guide.md (100%) rename website/docs/guides/1.0.0-upgrade-guide.html.markdown => docs/guides/1.0.0-upgrade-guide.md (100%) rename website/docs/guides/1.0.1-upgrade-guide.html.markdown => docs/guides/1.0.1-upgrade-guide.md (100%) rename website/docs/guides/1.1.0-upgrade-guide.html.markdown => docs/guides/1.1.0-upgrade-guide.md (100%) rename website/docs/guides/1.10.0-upgrade-guide.html.markdown => docs/guides/1.10.0-upgrade-guide.md (100%) rename website/docs/guides/1.11.0-upgrade-guide.html.markdown => docs/guides/1.11.0-upgrade-guide.md (100%) rename website/docs/guides/1.12.0-upgrade-guide.html.markdown => docs/guides/1.12.0-upgrade-guide.md (100%) rename website/docs/guides/1.13.0-upgrade-guide.html.markdown => docs/guides/1.13.0-upgrade-guide.md (100%) rename website/docs/guides/1.14.0-upgrade-guide.html.markdown => docs/guides/1.14.0-upgrade-guide.md (100%) rename website/docs/guides/1.15.0-upgrade-guide.html.markdown => docs/guides/1.15.0-upgrade-guide.md (100%) rename website/docs/guides/1.16.0-upgrade-guide.html.markdown => docs/guides/1.16.0-upgrade-guide.md (100%) rename website/docs/guides/1.17.0-upgrade-guide.html.markdown => docs/guides/1.17.0-upgrade-guide.md (100%) rename website/docs/guides/1.2.0-upgrade-guide.html.markdown => docs/guides/1.2.0-upgrade-guide.md (100%) rename website/docs/guides/1.3.0-upgrade-guide.html.markdown => docs/guides/1.3.0-upgrade-guide.md (100%) rename website/docs/guides/1.4.0-upgrade-guide.html.markdown => docs/guides/1.4.0-upgrade-guide.md (100%) rename website/docs/guides/1.5.0-upgrade-guide.html.markdown => docs/guides/1.5.0-upgrade-guide.md (100%) rename website/docs/guides/1.6.0-upgrade-guide.html.markdown => docs/guides/1.6.0-upgrade-guide.md (100%) rename website/docs/guides/1.7.0-upgrade-guide.html.markdown => docs/guides/1.7.0-upgrade-guide.md (92%) rename website/docs/guides/1.8.0-upgrade-guide.html.markdown => docs/guides/1.8.0-upgrade-guide.md (100%) rename website/docs/guides/1.9.0-upgrade-guide.html.markdown => docs/guides/1.9.0-upgrade-guide.md (100%) rename website/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.html.markdown => docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.md (100%) rename website/docs/index.html.markdown => docs/index.md (100%) rename website/docs/r/access_list_api_key.html.markdown => docs/resources/access_list_api_key.md (100%) rename website/docs/r/advanced_cluster.html.markdown => docs/resources/advanced_cluster.md (100%) rename website/docs/r/alert_configuration.html.markdown => docs/resources/alert_configuration.md (100%) rename website/docs/r/api_key.html.markdown => docs/resources/api_key.md (100%) rename website/docs/r/auditing.html.markdown => docs/resources/auditing.md (100%) rename website/docs/r/backup_compliance_policy.html.markdown => docs/resources/backup_compliance_policy.md (100%) rename website/docs/r/cloud_backup_schedule.html.markdown => docs/resources/cloud_backup_schedule.md (100%) rename website/docs/r/cloud_backup_snapshot.html.markdown => docs/resources/cloud_backup_snapshot.md (100%) rename website/docs/r/cloud_backup_snapshot_export_bucket.html.markdown => docs/resources/cloud_backup_snapshot_export_bucket.md (100%) rename website/docs/r/cloud_backup_snapshot_export_job.html.markdown => docs/resources/cloud_backup_snapshot_export_job.md (100%) rename website/docs/r/cloud_backup_snapshot_restore_job.html.markdown => docs/resources/cloud_backup_snapshot_restore_job.md (100%) rename website/docs/r/cloud_provider_access.markdown => docs/resources/cloud_provider_access.md (100%) rename website/docs/r/cloud_provider_snapshot.html.markdown => docs/resources/cloud_provider_snapshot.md (100%) rename website/docs/r/cloud_provider_snapshot_backup_policy.html.markdown => docs/resources/cloud_provider_snapshot_backup_policy.md (100%) rename website/docs/r/cloud_provider_snapshot_restore_job.html.markdown => docs/resources/cloud_provider_snapshot_restore_job.md (100%) rename website/docs/r/cluster.html.markdown => docs/resources/cluster.md (100%) rename website/docs/r/cluster_outage_simulation.html.markdown => docs/resources/cluster_outage_simulation.md (100%) rename website/docs/r/custom_db_role.html.markdown => docs/resources/custom_db_role.md (100%) rename website/docs/r/custom_dns_configuration_cluster_aws.markdown => docs/resources/custom_dns_configuration_cluster_aws.md (100%) rename website/docs/r/data_lake_pipeline.html.markdown => docs/resources/data_lake_pipeline.md (100%) rename website/docs/r/database_user.html.markdown => docs/resources/database_user.md (100%) rename website/docs/r/encryption_at_rest.html.markdown => docs/resources/encryption_at_rest.md (100%) rename website/docs/r/event_trigger.html.markdown => docs/resources/event_trigger.md (100%) rename website/docs/r/federated_database_instance.html.markdown => docs/resources/federated_database_instance.md (100%) rename website/docs/r/federated_query_limit.html.markdown => docs/resources/federated_query_limit.md (100%) rename website/docs/r/federated_settings_identity_provider.html.markdown => docs/resources/federated_settings_identity_provider.md (100%) rename website/docs/r/federated_settings_org_config.html.markdown => docs/resources/federated_settings_org_config.md (100%) rename website/docs/r/federated_settings_org_role_mapping.html.markdown => docs/resources/federated_settings_org_role_mapping.md (100%) rename website/docs/r/global_cluster_config.html.markdown => docs/resources/global_cluster_config.md (100%) rename website/docs/r/ldap_configuration.html.markdown => docs/resources/ldap_configuration.md (93%) rename website/docs/r/ldap_verify.html.markdown => docs/resources/ldap_verify.md (100%) rename website/docs/r/maintenance_window.html.markdown => docs/resources/maintenance_window.md (100%) rename website/docs/r/network_container.html.markdown => docs/resources/network_container.md (100%) rename website/docs/r/network_peering.html.markdown => docs/resources/network_peering.md (100%) rename website/docs/r/online_archive.html.markdown => docs/resources/online_archive.md (100%) rename website/docs/r/org_invitation.html.markdown => docs/resources/org_invitation.md (100%) rename website/docs/r/organization.html.markdown => docs/resources/organization.md (100%) rename website/docs/r/private_endpoint_regional_mode.html.markdown => docs/resources/private_endpoint_regional_mode.md (100%) rename website/docs/r/privatelink_endpoint.html.markdown => docs/resources/privatelink_endpoint.md (100%) rename website/docs/r/privatelink_endpoint_serverless.html.markdown => docs/resources/privatelink_endpoint_serverless.md (100%) rename website/docs/r/privatelink_endpoint_service.html.markdown => docs/resources/privatelink_endpoint_service.md (100%) rename website/docs/r/privatelink_endpoint_service_data_federation_online_archive.html.markdown => docs/resources/privatelink_endpoint_service_data_federation_online_archive.md (100%) rename website/docs/r/privatelink_endpoint_service_serverless.html.markdown => docs/resources/privatelink_endpoint_service_serverless.md (100%) rename website/docs/r/project.html.markdown => docs/resources/project.md (100%) rename website/docs/r/project_api_key.html.markdown => docs/resources/project_api_key.md (100%) rename website/docs/r/project_invitation.html.markdown => docs/resources/project_invitation.md (100%) rename website/docs/r/project_ip_access_list.html.markdown => docs/resources/project_ip_access_list.md (100%) rename website/docs/r/push_based_log_export.html.markdown => docs/resources/push_based_log_export.md (94%) rename website/docs/r/search_deployment.html.markdown => docs/resources/search_deployment.md (100%) rename website/docs/r/search_index.html.markdown => docs/resources/search_index.md (100%) rename website/docs/r/serverless_instance.html.markdown => docs/resources/serverless_instance.md (100%) rename website/docs/r/stream_connection.html.markdown => docs/resources/stream_connection.md (100%) rename website/docs/r/stream_instance.html.markdown => docs/resources/stream_instance.md (100%) rename website/docs/r/team.html.markdown => docs/resources/team.md (100%) rename website/docs/r/teams.html.markdown => docs/resources/teams.md (100%) rename website/docs/r/third_party_integration.markdown => docs/resources/third_party_integration.md (100%) rename website/docs/r/x509_authentication_database_user.html.markdown => docs/resources/x509_authentication_database_user.md (100%) rename website/docs/troubleshooting.html.markdown => docs/troubleshooting.md (100%) diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 44f4adf85a..56888b51ce 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -26,9 +26,3 @@ if [ -n "$STAGED_TF_FILES" ]; then echo "Checking the format of Terraform files" make tflint fi - -STAGED_WEBSITES_FILES=$(git diff --cached --name-only | grep "website/") -if [ -n "$STAGED_WEBSITES_FILES" ]; then - echo "Checking the format of website files" - make website-lint -fi diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6a9258efdb..7690854475 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,6 @@ # Maintained by the MongoDB APIx-Integrations team * @mongodb/APIx-Integrations - # Changelog entries reviewed by Docs Cloud Team /.changelog/ @mongodb/docs-cloud-team -/website/ @mongodb/docs-cloud-team +/docs/ @mongodb/docs-cloud-team diff --git a/.github/ISSUE_TEMPLATE/Bug_Report.md b/.github/ISSUE_TEMPLATE/Bug_Report.md index d165962905..367d472c18 100644 --- a/.github/ISSUE_TEMPLATE/Bug_Report.md +++ b/.github/ISSUE_TEMPLATE/Bug_Report.md @@ -19,7 +19,7 @@ Our support will prioritise issues that contain all the required information tha ### Terraform CLI and Terraform MongoDB Atlas Provider Version -Please ensure your issue is reproducible on a supported Terraform version. You may review our [Terraform version compatibility matrix](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/website/docs/index.html.markdown#hashicorp-terraform-version-compatibility-matrix) to know more. +Please ensure your issue is reproducible on a supported Terraform version. You may review our [Terraform version compatibility matrix](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/docs/index.md#hashicorp-terraform-version-compatibility-matrix) to know more. diff --git a/website/docs/d/roles_org_id.html.markdown b/docs/data-sources/roles_org_id.md similarity index 100% rename from website/docs/d/roles_org_id.html.markdown rename to docs/data-sources/roles_org_id.md diff --git a/website/docs/d/search_deployment.html.markdown b/docs/data-sources/search_deployment.md similarity index 100% rename from website/docs/d/search_deployment.html.markdown rename to docs/data-sources/search_deployment.md diff --git a/website/docs/d/search_index.html.markdown b/docs/data-sources/search_index.md similarity index 100% rename from website/docs/d/search_index.html.markdown rename to docs/data-sources/search_index.md diff --git a/website/docs/d/search_indexes.html.markdown b/docs/data-sources/search_indexes.md similarity index 100% rename from website/docs/d/search_indexes.html.markdown rename to docs/data-sources/search_indexes.md diff --git a/website/docs/d/serverless_instance.html.markdown b/docs/data-sources/serverless_instance.md similarity index 100% rename from website/docs/d/serverless_instance.html.markdown rename to docs/data-sources/serverless_instance.md diff --git a/website/docs/d/serverless_instances.html.markdown b/docs/data-sources/serverless_instances.md similarity index 100% rename from website/docs/d/serverless_instances.html.markdown rename to docs/data-sources/serverless_instances.md diff --git a/website/docs/d/stream_connection.html.markdown b/docs/data-sources/stream_connection.md similarity index 100% rename from website/docs/d/stream_connection.html.markdown rename to docs/data-sources/stream_connection.md diff --git a/website/docs/d/stream_connections.html.markdown b/docs/data-sources/stream_connections.md similarity index 100% rename from website/docs/d/stream_connections.html.markdown rename to docs/data-sources/stream_connections.md diff --git a/website/docs/d/stream_instance.html.markdown b/docs/data-sources/stream_instance.md similarity index 100% rename from website/docs/d/stream_instance.html.markdown rename to docs/data-sources/stream_instance.md diff --git a/website/docs/d/stream_instances.html.markdown b/docs/data-sources/stream_instances.md similarity index 100% rename from website/docs/d/stream_instances.html.markdown rename to docs/data-sources/stream_instances.md diff --git a/website/docs/d/team.html.markdown b/docs/data-sources/team.md similarity index 100% rename from website/docs/d/team.html.markdown rename to docs/data-sources/team.md diff --git a/website/docs/d/teams.html.markdown b/docs/data-sources/teams.md similarity index 100% rename from website/docs/d/teams.html.markdown rename to docs/data-sources/teams.md diff --git a/website/docs/d/third_party_integration.markdown b/docs/data-sources/third_party_integration.md similarity index 100% rename from website/docs/d/third_party_integration.markdown rename to docs/data-sources/third_party_integration.md diff --git a/website/docs/d/third_party_integrations.markdown b/docs/data-sources/third_party_integrations.md similarity index 100% rename from website/docs/d/third_party_integrations.markdown rename to docs/data-sources/third_party_integrations.md diff --git a/website/docs/d/x509_authentication_database_user.html.markdown b/docs/data-sources/x509_authentication_database_user.md similarity index 100% rename from website/docs/d/x509_authentication_database_user.html.markdown rename to docs/data-sources/x509_authentication_database_user.md diff --git a/website/docs/guides/0.6.0-upgrade-guide.html.markdown b/docs/guides/0.6.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/0.6.0-upgrade-guide.html.markdown rename to docs/guides/0.6.0-upgrade-guide.md diff --git a/website/docs/guides/0.8.0-upgrade-guide.html.markdown b/docs/guides/0.8.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/0.8.0-upgrade-guide.html.markdown rename to docs/guides/0.8.0-upgrade-guide.md diff --git a/website/docs/guides/0.8.2-upgrade-guide.html.markdown b/docs/guides/0.8.2-upgrade-guide.md similarity index 100% rename from website/docs/guides/0.8.2-upgrade-guide.html.markdown rename to docs/guides/0.8.2-upgrade-guide.md diff --git a/website/docs/guides/0.9.0-upgrade-guide.html.markdown b/docs/guides/0.9.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/0.9.0-upgrade-guide.html.markdown rename to docs/guides/0.9.0-upgrade-guide.md diff --git a/website/docs/guides/0.9.1-upgrade-guide.html.markdown b/docs/guides/0.9.1-upgrade-guide.md similarity index 100% rename from website/docs/guides/0.9.1-upgrade-guide.html.markdown rename to docs/guides/0.9.1-upgrade-guide.md diff --git a/website/docs/guides/1.0.0-upgrade-guide.html.markdown b/docs/guides/1.0.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.0.0-upgrade-guide.html.markdown rename to docs/guides/1.0.0-upgrade-guide.md diff --git a/website/docs/guides/1.0.1-upgrade-guide.html.markdown b/docs/guides/1.0.1-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.0.1-upgrade-guide.html.markdown rename to docs/guides/1.0.1-upgrade-guide.md diff --git a/website/docs/guides/1.1.0-upgrade-guide.html.markdown b/docs/guides/1.1.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.1.0-upgrade-guide.html.markdown rename to docs/guides/1.1.0-upgrade-guide.md diff --git a/website/docs/guides/1.10.0-upgrade-guide.html.markdown b/docs/guides/1.10.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.10.0-upgrade-guide.html.markdown rename to docs/guides/1.10.0-upgrade-guide.md diff --git a/website/docs/guides/1.11.0-upgrade-guide.html.markdown b/docs/guides/1.11.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.11.0-upgrade-guide.html.markdown rename to docs/guides/1.11.0-upgrade-guide.md diff --git a/website/docs/guides/1.12.0-upgrade-guide.html.markdown b/docs/guides/1.12.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.12.0-upgrade-guide.html.markdown rename to docs/guides/1.12.0-upgrade-guide.md diff --git a/website/docs/guides/1.13.0-upgrade-guide.html.markdown b/docs/guides/1.13.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.13.0-upgrade-guide.html.markdown rename to docs/guides/1.13.0-upgrade-guide.md diff --git a/website/docs/guides/1.14.0-upgrade-guide.html.markdown b/docs/guides/1.14.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.14.0-upgrade-guide.html.markdown rename to docs/guides/1.14.0-upgrade-guide.md diff --git a/website/docs/guides/1.15.0-upgrade-guide.html.markdown b/docs/guides/1.15.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.15.0-upgrade-guide.html.markdown rename to docs/guides/1.15.0-upgrade-guide.md diff --git a/website/docs/guides/1.16.0-upgrade-guide.html.markdown b/docs/guides/1.16.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.16.0-upgrade-guide.html.markdown rename to docs/guides/1.16.0-upgrade-guide.md diff --git a/website/docs/guides/1.17.0-upgrade-guide.html.markdown b/docs/guides/1.17.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.17.0-upgrade-guide.html.markdown rename to docs/guides/1.17.0-upgrade-guide.md diff --git a/website/docs/guides/1.2.0-upgrade-guide.html.markdown b/docs/guides/1.2.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.2.0-upgrade-guide.html.markdown rename to docs/guides/1.2.0-upgrade-guide.md diff --git a/website/docs/guides/1.3.0-upgrade-guide.html.markdown b/docs/guides/1.3.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.3.0-upgrade-guide.html.markdown rename to docs/guides/1.3.0-upgrade-guide.md diff --git a/website/docs/guides/1.4.0-upgrade-guide.html.markdown b/docs/guides/1.4.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.4.0-upgrade-guide.html.markdown rename to docs/guides/1.4.0-upgrade-guide.md diff --git a/website/docs/guides/1.5.0-upgrade-guide.html.markdown b/docs/guides/1.5.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.5.0-upgrade-guide.html.markdown rename to docs/guides/1.5.0-upgrade-guide.md diff --git a/website/docs/guides/1.6.0-upgrade-guide.html.markdown b/docs/guides/1.6.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.6.0-upgrade-guide.html.markdown rename to docs/guides/1.6.0-upgrade-guide.md diff --git a/website/docs/guides/1.7.0-upgrade-guide.html.markdown b/docs/guides/1.7.0-upgrade-guide.md similarity index 92% rename from website/docs/guides/1.7.0-upgrade-guide.html.markdown rename to docs/guides/1.7.0-upgrade-guide.md index f29a56002c..ee9988f593 100644 --- a/website/docs/guides/1.7.0-upgrade-guide.html.markdown +++ b/docs/guides/1.7.0-upgrade-guide.md @@ -8,12 +8,10 @@ subcategory: "Older Guides" The Terraform MongoDB Atlas Provider version 1.7.0 has one main new and exciting feature. New Features: -* You can now [`authenticate with AWS Secrets Manager (AWS SM)`](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/website/docs/index.html.markdown#aws-secrets-manager) - +* You can now [`authenticate with AWS Secrets Manager (AWS SM)`](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/docs/index.md#aws-secrets-manager) See the [CHANGELOG](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/CHANGELOG.md) for more details. - ### Helpful Links * [Report bugs](https://github.com/mongodb/terraform-provider-mongodbatlas/issues) diff --git a/website/docs/guides/1.8.0-upgrade-guide.html.markdown b/docs/guides/1.8.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.8.0-upgrade-guide.html.markdown rename to docs/guides/1.8.0-upgrade-guide.md diff --git a/website/docs/guides/1.9.0-upgrade-guide.html.markdown b/docs/guides/1.9.0-upgrade-guide.md similarity index 100% rename from website/docs/guides/1.9.0-upgrade-guide.html.markdown rename to docs/guides/1.9.0-upgrade-guide.md diff --git a/website/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.html.markdown b/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.md similarity index 100% rename from website/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.html.markdown rename to docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.md diff --git a/website/docs/index.html.markdown b/docs/index.md similarity index 100% rename from website/docs/index.html.markdown rename to docs/index.md diff --git a/website/docs/r/access_list_api_key.html.markdown b/docs/resources/access_list_api_key.md similarity index 100% rename from website/docs/r/access_list_api_key.html.markdown rename to docs/resources/access_list_api_key.md diff --git a/website/docs/r/advanced_cluster.html.markdown b/docs/resources/advanced_cluster.md similarity index 100% rename from website/docs/r/advanced_cluster.html.markdown rename to docs/resources/advanced_cluster.md diff --git a/website/docs/r/alert_configuration.html.markdown b/docs/resources/alert_configuration.md similarity index 100% rename from website/docs/r/alert_configuration.html.markdown rename to docs/resources/alert_configuration.md diff --git a/website/docs/r/api_key.html.markdown b/docs/resources/api_key.md similarity index 100% rename from website/docs/r/api_key.html.markdown rename to docs/resources/api_key.md diff --git a/website/docs/r/auditing.html.markdown b/docs/resources/auditing.md similarity index 100% rename from website/docs/r/auditing.html.markdown rename to docs/resources/auditing.md diff --git a/website/docs/r/backup_compliance_policy.html.markdown b/docs/resources/backup_compliance_policy.md similarity index 100% rename from website/docs/r/backup_compliance_policy.html.markdown rename to docs/resources/backup_compliance_policy.md diff --git a/website/docs/r/cloud_backup_schedule.html.markdown b/docs/resources/cloud_backup_schedule.md similarity index 100% rename from website/docs/r/cloud_backup_schedule.html.markdown rename to docs/resources/cloud_backup_schedule.md diff --git a/website/docs/r/cloud_backup_snapshot.html.markdown b/docs/resources/cloud_backup_snapshot.md similarity index 100% rename from website/docs/r/cloud_backup_snapshot.html.markdown rename to docs/resources/cloud_backup_snapshot.md diff --git a/website/docs/r/cloud_backup_snapshot_export_bucket.html.markdown b/docs/resources/cloud_backup_snapshot_export_bucket.md similarity index 100% rename from website/docs/r/cloud_backup_snapshot_export_bucket.html.markdown rename to docs/resources/cloud_backup_snapshot_export_bucket.md diff --git a/website/docs/r/cloud_backup_snapshot_export_job.html.markdown b/docs/resources/cloud_backup_snapshot_export_job.md similarity index 100% rename from website/docs/r/cloud_backup_snapshot_export_job.html.markdown rename to docs/resources/cloud_backup_snapshot_export_job.md diff --git a/website/docs/r/cloud_backup_snapshot_restore_job.html.markdown b/docs/resources/cloud_backup_snapshot_restore_job.md similarity index 100% rename from website/docs/r/cloud_backup_snapshot_restore_job.html.markdown rename to docs/resources/cloud_backup_snapshot_restore_job.md diff --git a/website/docs/r/cloud_provider_access.markdown b/docs/resources/cloud_provider_access.md similarity index 100% rename from website/docs/r/cloud_provider_access.markdown rename to docs/resources/cloud_provider_access.md diff --git a/website/docs/r/cloud_provider_snapshot.html.markdown b/docs/resources/cloud_provider_snapshot.md similarity index 100% rename from website/docs/r/cloud_provider_snapshot.html.markdown rename to docs/resources/cloud_provider_snapshot.md diff --git a/website/docs/r/cloud_provider_snapshot_backup_policy.html.markdown b/docs/resources/cloud_provider_snapshot_backup_policy.md similarity index 100% rename from website/docs/r/cloud_provider_snapshot_backup_policy.html.markdown rename to docs/resources/cloud_provider_snapshot_backup_policy.md diff --git a/website/docs/r/cloud_provider_snapshot_restore_job.html.markdown b/docs/resources/cloud_provider_snapshot_restore_job.md similarity index 100% rename from website/docs/r/cloud_provider_snapshot_restore_job.html.markdown rename to docs/resources/cloud_provider_snapshot_restore_job.md diff --git a/website/docs/r/cluster.html.markdown b/docs/resources/cluster.md similarity index 100% rename from website/docs/r/cluster.html.markdown rename to docs/resources/cluster.md diff --git a/website/docs/r/cluster_outage_simulation.html.markdown b/docs/resources/cluster_outage_simulation.md similarity index 100% rename from website/docs/r/cluster_outage_simulation.html.markdown rename to docs/resources/cluster_outage_simulation.md diff --git a/website/docs/r/custom_db_role.html.markdown b/docs/resources/custom_db_role.md similarity index 100% rename from website/docs/r/custom_db_role.html.markdown rename to docs/resources/custom_db_role.md diff --git a/website/docs/r/custom_dns_configuration_cluster_aws.markdown b/docs/resources/custom_dns_configuration_cluster_aws.md similarity index 100% rename from website/docs/r/custom_dns_configuration_cluster_aws.markdown rename to docs/resources/custom_dns_configuration_cluster_aws.md diff --git a/website/docs/r/data_lake_pipeline.html.markdown b/docs/resources/data_lake_pipeline.md similarity index 100% rename from website/docs/r/data_lake_pipeline.html.markdown rename to docs/resources/data_lake_pipeline.md diff --git a/website/docs/r/database_user.html.markdown b/docs/resources/database_user.md similarity index 100% rename from website/docs/r/database_user.html.markdown rename to docs/resources/database_user.md diff --git a/website/docs/r/encryption_at_rest.html.markdown b/docs/resources/encryption_at_rest.md similarity index 100% rename from website/docs/r/encryption_at_rest.html.markdown rename to docs/resources/encryption_at_rest.md diff --git a/website/docs/r/event_trigger.html.markdown b/docs/resources/event_trigger.md similarity index 100% rename from website/docs/r/event_trigger.html.markdown rename to docs/resources/event_trigger.md diff --git a/website/docs/r/federated_database_instance.html.markdown b/docs/resources/federated_database_instance.md similarity index 100% rename from website/docs/r/federated_database_instance.html.markdown rename to docs/resources/federated_database_instance.md diff --git a/website/docs/r/federated_query_limit.html.markdown b/docs/resources/federated_query_limit.md similarity index 100% rename from website/docs/r/federated_query_limit.html.markdown rename to docs/resources/federated_query_limit.md diff --git a/website/docs/r/federated_settings_identity_provider.html.markdown b/docs/resources/federated_settings_identity_provider.md similarity index 100% rename from website/docs/r/federated_settings_identity_provider.html.markdown rename to docs/resources/federated_settings_identity_provider.md diff --git a/website/docs/r/federated_settings_org_config.html.markdown b/docs/resources/federated_settings_org_config.md similarity index 100% rename from website/docs/r/federated_settings_org_config.html.markdown rename to docs/resources/federated_settings_org_config.md diff --git a/website/docs/r/federated_settings_org_role_mapping.html.markdown b/docs/resources/federated_settings_org_role_mapping.md similarity index 100% rename from website/docs/r/federated_settings_org_role_mapping.html.markdown rename to docs/resources/federated_settings_org_role_mapping.md diff --git a/website/docs/r/global_cluster_config.html.markdown b/docs/resources/global_cluster_config.md similarity index 100% rename from website/docs/r/global_cluster_config.html.markdown rename to docs/resources/global_cluster_config.md diff --git a/website/docs/r/ldap_configuration.html.markdown b/docs/resources/ldap_configuration.md similarity index 93% rename from website/docs/r/ldap_configuration.html.markdown rename to docs/resources/ldap_configuration.md index f0d6d527df..a6d2642801 100644 --- a/website/docs/r/ldap_configuration.html.markdown +++ b/docs/resources/ldap_configuration.md @@ -1,6 +1,6 @@ # Resource: mongodbatlas_ldap_configuration -`mongodbatlas_ldap_configuration` provides an LDAP Configuration resource. This allows an LDAP configuration for an Atlas project to be crated and managed. This endpoint doesn’t verify connectivity using the provided LDAP over TLS configuration details. To verify a configuration before saving it, use the resource to [verify](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/INTMDB-114/website/docs/r/ldap_verify.html.markdown) the LDAP configuration. +`mongodbatlas_ldap_configuration` provides an LDAP Configuration resource. This allows an LDAP configuration for an Atlas project to be created and managed. This endpoint doesn’t verify connectivity using the provided LDAP over TLS configuration details. To verify a configuration before saving it, use the resource to [verify](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/docs/resources/ldap_verify.md) the LDAP configuration. ## Example Usage diff --git a/website/docs/r/ldap_verify.html.markdown b/docs/resources/ldap_verify.md similarity index 100% rename from website/docs/r/ldap_verify.html.markdown rename to docs/resources/ldap_verify.md diff --git a/website/docs/r/maintenance_window.html.markdown b/docs/resources/maintenance_window.md similarity index 100% rename from website/docs/r/maintenance_window.html.markdown rename to docs/resources/maintenance_window.md diff --git a/website/docs/r/network_container.html.markdown b/docs/resources/network_container.md similarity index 100% rename from website/docs/r/network_container.html.markdown rename to docs/resources/network_container.md diff --git a/website/docs/r/network_peering.html.markdown b/docs/resources/network_peering.md similarity index 100% rename from website/docs/r/network_peering.html.markdown rename to docs/resources/network_peering.md diff --git a/website/docs/r/online_archive.html.markdown b/docs/resources/online_archive.md similarity index 100% rename from website/docs/r/online_archive.html.markdown rename to docs/resources/online_archive.md diff --git a/website/docs/r/org_invitation.html.markdown b/docs/resources/org_invitation.md similarity index 100% rename from website/docs/r/org_invitation.html.markdown rename to docs/resources/org_invitation.md diff --git a/website/docs/r/organization.html.markdown b/docs/resources/organization.md similarity index 100% rename from website/docs/r/organization.html.markdown rename to docs/resources/organization.md diff --git a/website/docs/r/private_endpoint_regional_mode.html.markdown b/docs/resources/private_endpoint_regional_mode.md similarity index 100% rename from website/docs/r/private_endpoint_regional_mode.html.markdown rename to docs/resources/private_endpoint_regional_mode.md diff --git a/website/docs/r/privatelink_endpoint.html.markdown b/docs/resources/privatelink_endpoint.md similarity index 100% rename from website/docs/r/privatelink_endpoint.html.markdown rename to docs/resources/privatelink_endpoint.md diff --git a/website/docs/r/privatelink_endpoint_serverless.html.markdown b/docs/resources/privatelink_endpoint_serverless.md similarity index 100% rename from website/docs/r/privatelink_endpoint_serverless.html.markdown rename to docs/resources/privatelink_endpoint_serverless.md diff --git a/website/docs/r/privatelink_endpoint_service.html.markdown b/docs/resources/privatelink_endpoint_service.md similarity index 100% rename from website/docs/r/privatelink_endpoint_service.html.markdown rename to docs/resources/privatelink_endpoint_service.md diff --git a/website/docs/r/privatelink_endpoint_service_data_federation_online_archive.html.markdown b/docs/resources/privatelink_endpoint_service_data_federation_online_archive.md similarity index 100% rename from website/docs/r/privatelink_endpoint_service_data_federation_online_archive.html.markdown rename to docs/resources/privatelink_endpoint_service_data_federation_online_archive.md diff --git a/website/docs/r/privatelink_endpoint_service_serverless.html.markdown b/docs/resources/privatelink_endpoint_service_serverless.md similarity index 100% rename from website/docs/r/privatelink_endpoint_service_serverless.html.markdown rename to docs/resources/privatelink_endpoint_service_serverless.md diff --git a/website/docs/r/project.html.markdown b/docs/resources/project.md similarity index 100% rename from website/docs/r/project.html.markdown rename to docs/resources/project.md diff --git a/website/docs/r/project_api_key.html.markdown b/docs/resources/project_api_key.md similarity index 100% rename from website/docs/r/project_api_key.html.markdown rename to docs/resources/project_api_key.md diff --git a/website/docs/r/project_invitation.html.markdown b/docs/resources/project_invitation.md similarity index 100% rename from website/docs/r/project_invitation.html.markdown rename to docs/resources/project_invitation.md diff --git a/website/docs/r/project_ip_access_list.html.markdown b/docs/resources/project_ip_access_list.md similarity index 100% rename from website/docs/r/project_ip_access_list.html.markdown rename to docs/resources/project_ip_access_list.md diff --git a/website/docs/r/push_based_log_export.html.markdown b/docs/resources/push_based_log_export.md similarity index 94% rename from website/docs/r/push_based_log_export.html.markdown rename to docs/resources/push_based_log_export.md index 591a0b1d34..5c2f5cb41a 100644 --- a/website/docs/r/push_based_log_export.html.markdown +++ b/docs/resources/push_based_log_export.md @@ -34,6 +34,14 @@ resource "mongodbatlas_push_based_log_export" "test" { iam_role_id = mongodbatlas_cloud_provider_access_authorization.auth_role.role_id prefix_path = "push-based-log-test" } + +data "mongodbatlas_push_based_log_export" "test" { + project_id = mongodbatlas_push_based_log_export.test.project_id +} + +output "test" { + value = data.mongodbatlas_push_based_log_export.test.prefix_path +} ``` @@ -43,12 +51,13 @@ resource "mongodbatlas_push_based_log_export" "test" { - `bucket_name` (String) The name of the bucket to which the agent sends the logs to. - `iam_role_id` (String) ID of the AWS IAM role that is used to write to the S3 bucket. -- `prefix_path` (String) S3 directory in which vector writes in order to store the logs. An empty string denotes the root directory. - `project_id` (String) Unique 24-hexadecimal digit string that identifies your project. Use the [/groups](#tag/Projects/operation/listProjects) endpoint to retrieve all projects to which the authenticated user has access. **NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups. ### Optional + +- `prefix_path` (String) S3 directory in which vector writes in order to store the logs. An empty string denotes the root directory. - `timeouts` (Attributes) (see [below for nested schema](#nestedatt--timeouts)) ### Read-Only diff --git a/website/docs/r/search_deployment.html.markdown b/docs/resources/search_deployment.md similarity index 100% rename from website/docs/r/search_deployment.html.markdown rename to docs/resources/search_deployment.md diff --git a/website/docs/r/search_index.html.markdown b/docs/resources/search_index.md similarity index 100% rename from website/docs/r/search_index.html.markdown rename to docs/resources/search_index.md diff --git a/website/docs/r/serverless_instance.html.markdown b/docs/resources/serverless_instance.md similarity index 100% rename from website/docs/r/serverless_instance.html.markdown rename to docs/resources/serverless_instance.md diff --git a/website/docs/r/stream_connection.html.markdown b/docs/resources/stream_connection.md similarity index 100% rename from website/docs/r/stream_connection.html.markdown rename to docs/resources/stream_connection.md diff --git a/website/docs/r/stream_instance.html.markdown b/docs/resources/stream_instance.md similarity index 100% rename from website/docs/r/stream_instance.html.markdown rename to docs/resources/stream_instance.md diff --git a/website/docs/r/team.html.markdown b/docs/resources/team.md similarity index 100% rename from website/docs/r/team.html.markdown rename to docs/resources/team.md diff --git a/website/docs/r/teams.html.markdown b/docs/resources/teams.md similarity index 100% rename from website/docs/r/teams.html.markdown rename to docs/resources/teams.md diff --git a/website/docs/r/third_party_integration.markdown b/docs/resources/third_party_integration.md similarity index 100% rename from website/docs/r/third_party_integration.markdown rename to docs/resources/third_party_integration.md diff --git a/website/docs/r/x509_authentication_database_user.html.markdown b/docs/resources/x509_authentication_database_user.md similarity index 100% rename from website/docs/r/x509_authentication_database_user.html.markdown rename to docs/resources/x509_authentication_database_user.md diff --git a/website/docs/troubleshooting.html.markdown b/docs/troubleshooting.md similarity index 100% rename from website/docs/troubleshooting.html.markdown rename to docs/troubleshooting.md diff --git a/examples/mongodbatlas_advanced_cluster/global-cluster/README.md b/examples/mongodbatlas_advanced_cluster/global-cluster/README.md index 71b82915f2..428821a1fe 100644 --- a/examples/mongodbatlas_advanced_cluster/global-cluster/README.md +++ b/examples/mongodbatlas_advanced_cluster/global-cluster/README.md @@ -30,7 +30,7 @@ private_key = "" atlas_org_id = "" ``` -... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/website/docs/index.html.markdown#aws-secrets-manager) +... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/docs/index.md#aws-secrets-manager) **2\. Review the Terraform plan.** diff --git a/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md b/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md index 4b97956416..4e356cb8c9 100644 --- a/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md +++ b/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md @@ -30,7 +30,7 @@ private_key = "" atlas_org_id = "" ``` -... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/website/docs/index.html.markdown#aws-secrets-manager) +... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/docs/index.md#aws-secrets-manager) **2\. Review the Terraform plan.** diff --git a/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md b/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md index b80cd6e205..01b3fc0988 100644 --- a/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md +++ b/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md @@ -32,7 +32,7 @@ private_key = "22b722a9-34f4-3b1b-aada-298329a5c128" atlas_org_id = "63f4d4a47baeac59406dc131" ``` -... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/website/docs/index.html.markdown#aws-secrets-manager) +... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/docs/index.md#aws-secrets-manager) **2\. Set your AWS access key & secret via environment variables: diff --git a/examples/mongodbatlas_encryption_at_rest/aws/multi-region-cluster/README.MD b/examples/mongodbatlas_encryption_at_rest/aws/multi-region-cluster/README.MD index 399938e967..88771e727a 100644 --- a/examples/mongodbatlas_encryption_at_rest/aws/multi-region-cluster/README.MD +++ b/examples/mongodbatlas_encryption_at_rest/aws/multi-region-cluster/README.MD @@ -32,7 +32,7 @@ private_key = "22b722a9-34f4-3b1b-aada-298329a5c128" atlas_org_id = "63f4d4a47baeac59406dc131" ``` -... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/website/docs/index.html.markdown#aws-secrets-manager) +... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/docs/index.md#aws-secrets-manager) **2\. Set your AWS access key & secret via environment variables: diff --git a/scripts/check-upgrade-guide-exists.sh b/scripts/check-upgrade-guide-exists.sh index 70c94c82ac..d1cdf7f136 100755 --- a/scripts/check-upgrade-guide-exists.sh +++ b/scripts/check-upgrade-guide-exists.sh @@ -10,7 +10,7 @@ IFS='.' read -r MAJOR MINOR PATCH <<< "$RELEASE_NUMBER" # Check if it's a major release (patch version is 0) if [ "$PATCH" -eq 0 ]; then - UPGRADE_GUIDE_PATH="website/docs/guides/$MAJOR.$MINOR.$PATCH-upgrade-guide.html.markdown" + UPGRADE_GUIDE_PATH="docs/guides/$MAJOR.$MINOR.$PATCH-upgrade-guide.md" echo "Checking for the presence of $UPGRADE_GUIDE_PATH" if [ ! -f "$UPGRADE_GUIDE_PATH" ]; then echo "Stopping release process, upgrade guide $UPGRADE_GUIDE_PATH does not exist. Please visit our docs for more details: https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/RELEASING.md" diff --git a/scripts/generate-doc.sh b/scripts/generate-doc.sh index 9adf5616dd..8e2973e6af 100755 --- a/scripts/generate-doc.sh +++ b/scripts/generate-doc.sh @@ -32,7 +32,7 @@ set -euo pipefail -TF_VERSION="${TF_VERSION:-"1.7"}" # TF version to use when running tfplugindocs. Default: 1.7 +TF_VERSION="${TF_VERSION:-"1.9.2"}" # TF version to use when running tfplugindocs. Default: 1.9.2 TEMPLATE_FOLDER_PATH="${TEMPLATE_FOLDER_PATH:-"templates"}" # PATH to the templates folder. Default: templates @@ -67,39 +67,35 @@ if [ ! -f "${TEMPLATE_FOLDER_PATH}/data-sources/${resource_name}s.md.tmpl" ]; th printf "Skipping this check: We assume that the resource does not have a plural data source.\n\n" fi -# tfplugindocs uses this folder to generate the documentations -mkdir -p docs +tfplugindocs generate --tf-version "${TF_VERSION}" --website-source-dir "${TEMPLATE_FOLDER_PATH}" --rendered-website-dir "docs-out" -tfplugindocs generate --tf-version "${TF_VERSION}" --website-source-dir "${TEMPLATE_FOLDER_PATH}" - -if [ ! -f "docs/resources/${resource_name}.md" ]; then +if [ ! -f "docs-out/resources/${resource_name}.md" ]; then echo "Error: We cannot find the documentation file for the resource ${resource_name}.md" echo "Please, make sure to include the resource template under templates/resources/${resource_name}.md.tmpl" printf "Skipping this step: We assume that only a data source is being generated.\n\n" else - printf "\nMoving the generated file %s.md to the website folder" "${resource_name}" - mv "docs/resources/${resource_name}.md" "website/docs/r/${resource_name}.html.markdown" + printf "Moving the generated resource file %s.md to the website folder.\n" "${resource_name}" + mv "docs-out/resources/${resource_name}.md" "docs/resources/${resource_name}.md" fi -if [ ! -f "docs/data-sources/${resource_name}.md" ]; then +if [ ! -f "docs-out/data-sources/${resource_name}.md" ]; then echo "Error: We cannot find the documentation file for the data source ${resource_name}.md" echo "Please, make sure to include the data source template under templates/data-sources/${resource_name}.md.tmpl" exit 1 else - printf "\nMoving the generated file %s.md to the website folder" "${resource_name}" - mv "docs/data-sources/${resource_name}.md" "website/docs/d/${resource_name}.html.markdown" + printf "Moving the generated data-source file %s.md to the website folder.\n" "${resource_name}" + mv "docs-out/data-sources/${resource_name}.md" "docs/data-sources/${resource_name}.md" fi -if [ ! -f "docs/data-sources/${resource_name}s.md" ]; then - echo "Warning: We cannot find the documentation file for the data source ${resource_name}s.md." +if [ ! -f "docs-out/data-sources/${resource_name}s.md" ]; then + echo "Warning: We cannot find the documentation file for the plural data source ${resource_name}s.md." echo "Please, make sure to include the data source template under templates/data-sources/${resource_name}s.md.tmpl" printf "Skipping this step: We assume that the resource does not have a plural data source.\n\n" else - printf "\nMoving the generated file %s.md to the website folder" "${resource_name}s" - mv "docs/data-sources/${resource_name}s.md" "website/docs/d/${resource_name}s.html.markdown" + printf "\nMoving the generated plural data-source file %s.md to the website folder.\n" "${resource_name}s" + mv "docs-out/data-sources/${resource_name}s.md" "docs/data-sources/${resource_name}s.md" fi -# Delete the docs/ folder -rm -R docs/ +rm -R docs-out/ printf "\nThe documentation for %s has been created.\n" "${resource_name}" diff --git a/scripts/update-examples-reference-in-docs.sh b/scripts/update-examples-reference-in-docs.sh index 2df63f2e12..90ff16ba54 100755 --- a/scripts/update-examples-reference-in-docs.sh +++ b/scripts/update-examples-reference-in-docs.sh @@ -4,7 +4,7 @@ set -euo pipefail : "${1?"Tag of new release must be provided"}" -FILE_PATH="./website/docs/index.html.markdown" +FILE_PATH="./docs/index.md" RELEASE_TAG=$1 # Define the old URL pattern and new URL diff --git a/scripts/update-tf-compatibility-matrix.sh b/scripts/update-tf-compatibility-matrix.sh index 9d9b172a66..8e1925d6a3 100755 --- a/scripts/update-tf-compatibility-matrix.sh +++ b/scripts/update-tf-compatibility-matrix.sh @@ -17,7 +17,7 @@ set -euo pipefail input_array=$(./scripts/get-terraform-supported-versions.sh "true") -indexFile="website/docs/index.html.markdown" +indexFile="docs/index.md" transform_array() { local arr="$1" diff --git a/templates/data-source.md.tmpl b/templates/data-source.md.tmpl index 15038928c2..32b76776d1 100644 --- a/templates/data-source.md.tmpl +++ b/templates/data-source.md.tmpl @@ -1,7 +1,5 @@ # {{ if .Name }}{{.Type}}: {{.Name}}{{ end }} -{{ if .Description }} {{ .Description | trimspace }} {{ end }} - ## Example Usages {{ if .Name }} {{ if eq .Name "mongodbatlas_network_peering" }} diff --git a/templates/data-sources/control_plane_ip_addresses.md.tmpl b/templates/data-sources/control_plane_ip_addresses.md.tmpl index c1e6d6dc51..32993054eb 100644 --- a/templates/data-sources/control_plane_ip_addresses.md.tmpl +++ b/templates/data-sources/control_plane_ip_addresses.md.tmpl @@ -1,7 +1,6 @@ # {{.Type}}: {{.Name}} -{{ .Description | trimspace }} -Provides a data source that returns all control plane IP addresses. +`{{.Name}}` returns all control plane IP addresses. ## Example Usages {{ tffile (printf "examples/%s/main.tf" .Name )}} diff --git a/templates/data-sources/push_based_log_export.md.tmpl b/templates/data-sources/push_based_log_export.md.tmpl index 03255fcd68..0c25f4821c 100644 --- a/templates/data-sources/push_based_log_export.md.tmpl +++ b/templates/data-sources/push_based_log_export.md.tmpl @@ -1,7 +1,6 @@ # {{.Type}}: {{.Name}} -{{ .Description | trimspace }} -`mongodbatlas_push_based_log_export` describes the configured project level settings for the push-based log export feature. +`{{.Name}}` describes the configured project level settings for the push-based log export feature. ## Example Usages {{ tffile (printf "examples/%s/main.tf" .Name )}} diff --git a/templates/data-sources/search_deployment.md.tmpl b/templates/data-sources/search_deployment.md.tmpl index b20f0829e6..b746ea483e 100644 --- a/templates/data-sources/search_deployment.md.tmpl +++ b/templates/data-sources/search_deployment.md.tmpl @@ -1,7 +1,6 @@ # {{.Type}}: {{.Name}} -{{ .Description | trimspace }} -`mongodbatlas_search_deployment` describes a search node deployment. +`{{.Name}}` describes a search node deployment. ## Example Usages {{ tffile (printf "examples/%s/main.tf" .Name )}} diff --git a/templates/resources.md.tmpl b/templates/resources.md.tmpl index d81c9cfb75..8b86768a70 100644 --- a/templates/resources.md.tmpl +++ b/templates/resources.md.tmpl @@ -1,7 +1,5 @@ #{{ if .Name }} {{.Type}}: {{.Name}}{{ end }} -{{ if .Name }}{{ .Description | trimspace }}{{ end }} - ## Example Usages {{ if .Name }} {{ if eq .Name "mongodbatlas_network_peering" }} diff --git a/templates/resources/push_based_log_export.md.tmpl b/templates/resources/push_based_log_export.md.tmpl index ad2634f582..aadfb9d954 100644 --- a/templates/resources/push_based_log_export.md.tmpl +++ b/templates/resources/push_based_log_export.md.tmpl @@ -1,7 +1,6 @@ # {{.Type}}: {{.Name}} -{{ .Description | trimspace }} -`mongodbatlas_push_based_log_export` provides a resource for push-based log export feature. The resource lets you configure, enable & disable the project level settings for the push-based log export feature. Using this resource you +`{{.Name}}` provides a resource for push-based log export feature. The resource lets you configure, enable & disable the project level settings for the push-based log export feature. Using this resource you can continually push logs from mongod, mongos, and audit logs to an Amazon S3 bucket. Atlas exports logs every 5 minutes. diff --git a/templates/resources/search_deployment.md.tmpl b/templates/resources/search_deployment.md.tmpl index 1e503a8982..0b6c72b40f 100644 --- a/templates/resources/search_deployment.md.tmpl +++ b/templates/resources/search_deployment.md.tmpl @@ -1,7 +1,6 @@ # {{.Type}}: {{.Name}} -{{ .Description | trimspace }} -`mongodbatlas_search_deployment` provides a Search Deployment resource. The resource lets you create, edit and delete dedicated search nodes in a cluster. +`{{.Name}}` provides a Search Deployment resource. The resource lets you create, edit and delete dedicated search nodes in a cluster. -> **NOTE:** For details on supported cloud providers and existing limitations you can visit the [Search Node Documentation](https://www.mongodb.com/docs/atlas/cluster-config/multi-cloud-distribution/#search-nodes-for-workload-isolation). From 87ca68dd4987c2d7094774facc9a09e5f0b96344 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jul 2024 08:42:09 +0200 Subject: [PATCH 45/84] chore: Bump github.com/zclconf/go-cty from 1.14.4 to 1.15.0 (#2433) Bumps [github.com/zclconf/go-cty](https://github.com/zclconf/go-cty) from 1.14.4 to 1.15.0. - [Release notes](https://github.com/zclconf/go-cty/releases) - [Changelog](https://github.com/zclconf/go-cty/blob/main/CHANGELOG.md) - [Commits](https://github.com/zclconf/go-cty/compare/v1.14.4...v1.15.0) --- updated-dependencies: - dependency-name: github.com/zclconf/go-cty dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index eb750c90f4..a79d7689e3 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/mongodb-forks/digest v1.1.0 github.com/spf13/cast v1.6.0 github.com/stretchr/testify v1.9.0 - github.com/zclconf/go-cty v1.14.4 + github.com/zclconf/go-cty v1.15.0 go.mongodb.org/atlas v0.36.0 go.mongodb.org/atlas-sdk/v20240530002 v20240530002.0.0 go.mongodb.org/realm v0.1.0 diff --git a/go.sum b/go.sum index 2d40220fdb..3c38f649e3 100644 --- a/go.sum +++ b/go.sum @@ -770,8 +770,8 @@ github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLE github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= github.com/zclconf/go-cty v1.8.2/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= -github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= +github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= From b83c28ed357afb7d909642c337e27232ae6d93dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jul 2024 08:42:32 +0200 Subject: [PATCH 46/84] chore: Bump github.com/aws/aws-sdk-go from 1.54.17 to 1.54.19 (#2432) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.54.17 to 1.54.19. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.17...v1.54.19) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a79d7689e3..7eb2b3a2e7 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 - github.com/aws/aws-sdk-go v1.54.17 + github.com/aws/aws-sdk-go v1.54.19 github.com/go-test/deep v1.1.1 github.com/hashicorp/go-changelog v0.0.0-20240318095659-4d68c58a6e7f github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 diff --git a/go.sum b/go.sum index 3c38f649e3..dae41f7c4e 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.54.17 h1:ZV/qwcCIhMHgsJ6iXXPVYI0s1MdLT+5LW28ClzCUPeI= -github.com/aws/aws-sdk-go v1.54.17/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= From 626a83b17038327d2afba094f3f3c546fc4b9cb3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jul 2024 08:42:57 +0200 Subject: [PATCH 47/84] chore: Bump actions/setup-go from 5.0.1 to 5.0.2 (#2431) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.1 to 5.0.2. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/cdcb36043654635271a94b9a6d1392de5bb323a7...0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/acceptance-tests-runner.yml | 40 +++++++++---------- .../workflows/check-changelog-entry-file.yml | 2 +- .github/workflows/code-health.yml | 6 +-- .github/workflows/examples.yml | 4 +- .github/workflows/jira-release-version.yml | 2 +- .github/workflows/release.yml | 2 +- .github/workflows/run-script-and-commit.yml | 2 +- .github/workflows/update-sdk.yml | 2 +- 8 files changed, 30 insertions(+), 30 deletions(-) diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index bfda7bfd11..7a69c08827 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -287,7 +287,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -309,7 +309,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -349,7 +349,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -382,7 +382,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -406,7 +406,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -428,7 +428,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -473,7 +473,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -495,7 +495,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -517,7 +517,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -539,7 +539,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -577,7 +577,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -604,7 +604,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -633,7 +633,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -678,7 +678,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -715,7 +715,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -740,7 +740,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -762,7 +762,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -784,7 +784,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -811,7 +811,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 @@ -835,7 +835,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: ref: ${{ inputs.ref || github.ref }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 diff --git a/.github/workflows/check-changelog-entry-file.yml b/.github/workflows/check-changelog-entry-file.yml index 92eabd6502..3f8f275793 100644 --- a/.github/workflows/check-changelog-entry-file.yml +++ b/.github/workflows/check-changelog-entry-file.yml @@ -14,7 +14,7 @@ jobs: permissions: {} steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - run: make check-changelog-entry-file diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index d0ddab4a18..5d70673594 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -18,7 +18,7 @@ jobs: permissions: {} steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: Build @@ -30,7 +30,7 @@ jobs: pull-requests: write # Needed by sticky-pull-request-comment steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: Unit Test @@ -42,7 +42,7 @@ jobs: - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' cache: false # see https://github.com/golangci/golangci-lint-action/issues/807 diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 3a80754bad..93d8b69b48 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -25,7 +25,7 @@ jobs: with: fetch-depth: 0 - run: echo "GO_VERSION=$(cat .go-version)" >> "${GITHUB_ENV}" - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: go build @@ -59,7 +59,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 with: fetch-depth: 0 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: tflint diff --git a/.github/workflows/jira-release-version.yml b/.github/workflows/jira-release-version.yml index f759115b2b..bc0c881d00 100644 --- a/.github/workflows/jira-release-version.yml +++ b/.github/workflows/jira-release-version.yml @@ -24,7 +24,7 @@ jobs: - name: Validation of version format, no pre-releases run: | echo "${{ inputs.version_number }}" | grep -P '^v\d+\.\d+\.\d+$' - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - run: make jira-release-version diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7c82e9033e..589edb3967 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -135,7 +135,7 @@ jobs: with: ref: ${{ inputs.version_number }} - name: Set up Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: Import GPG key diff --git a/.github/workflows/run-script-and-commit.yml b/.github/workflows/run-script-and-commit.yml index d5f68124e7..e80f5b0af4 100644 --- a/.github/workflows/run-script-and-commit.yml +++ b/.github/workflows/run-script-and-commit.yml @@ -33,7 +33,7 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.apix_bot_pat }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' diff --git a/.github/workflows/update-sdk.yml b/.github/workflows/update-sdk.yml index 7a880c85a6..9ddbbb5059 100644 --- a/.github/workflows/update-sdk.yml +++ b/.github/workflows/update-sdk.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: Update files From fae643ded63392e9a46cf944610823584e7e74c6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jul 2024 08:43:33 +0200 Subject: [PATCH 48/84] chore: Bump tj-actions/verify-changed-files (#2430) Bumps [tj-actions/verify-changed-files](https://github.com/tj-actions/verify-changed-files) from 11ea2b36f98609331b8dc9c5ad9071ee317c6d28 to 79f398ac63ab46f7f820470c821d830e5c340ef9. - [Release notes](https://github.com/tj-actions/verify-changed-files/releases) - [Changelog](https://github.com/tj-actions/verify-changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/verify-changed-files/compare/11ea2b36f98609331b8dc9c5ad9071ee317c6d28...79f398ac63ab46f7f820470c821d830e5c340ef9) --- updated-dependencies: - dependency-name: tj-actions/verify-changed-files dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/update-sdk.yml | 2 +- .github/workflows/update_tf_compatibility_matrix.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-sdk.yml b/.github/workflows/update-sdk.yml index 9ddbbb5059..5bf2477337 100644 --- a/.github/workflows/update-sdk.yml +++ b/.github/workflows/update-sdk.yml @@ -20,7 +20,7 @@ jobs: - name: Update files run: make tools update-atlas-sdk - name: Verify Changed files - uses: tj-actions/verify-changed-files@11ea2b36f98609331b8dc9c5ad9071ee317c6d28 + uses: tj-actions/verify-changed-files@79f398ac63ab46f7f820470c821d830e5c340ef9 id: verify-changed-files - name: Create PR uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c diff --git a/.github/workflows/update_tf_compatibility_matrix.yml b/.github/workflows/update_tf_compatibility_matrix.yml index ffa81d48e9..d562f41bdc 100644 --- a/.github/workflows/update_tf_compatibility_matrix.yml +++ b/.github/workflows/update_tf_compatibility_matrix.yml @@ -19,7 +19,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: make update-tf-compatibility-matrix - name: Verify Changed files - uses: tj-actions/verify-changed-files@11ea2b36f98609331b8dc9c5ad9071ee317c6d28 + uses: tj-actions/verify-changed-files@79f398ac63ab46f7f820470c821d830e5c340ef9 id: verify-changed-files - name: Create PR uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c From bd01af7291e544ac34eb34509cd21620abf57c6a Mon Sep 17 00:00:00 2001 From: Espen Albert Date: Wed, 17 Jul 2024 09:02:30 +0100 Subject: [PATCH 49/84] refactor: avoid usage of github.com/go-test/deep (use `reflect.DeepEqual instead`) (#2427) --- go.mod | 1 - internal/common/conversion/encode_state_test.go | 10 +++++----- .../model_cloud_backup_snapshot_test.go | 6 +++--- .../service/eventtrigger/resource_event_trigger.go | 8 +++----- internal/service/searchindex/model_search_index.go | 5 ++--- 5 files changed, 13 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 7eb2b3a2e7..8172cf6b8b 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.22 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 github.com/aws/aws-sdk-go v1.54.19 - github.com/go-test/deep v1.1.1 github.com/hashicorp/go-changelog v0.0.0-20240318095659-4d68c58a6e7f github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-version v1.7.0 diff --git a/internal/common/conversion/encode_state_test.go b/internal/common/conversion/encode_state_test.go index 94628c994f..1b3e75c239 100644 --- a/internal/common/conversion/encode_state_test.go +++ b/internal/common/conversion/encode_state_test.go @@ -1,9 +1,9 @@ package conversion_test import ( + "reflect" "testing" - "github.com/go-test/deep" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" ) @@ -16,8 +16,8 @@ func TestEncodeDecodeID(t *testing.T) { got := conversion.DecodeStateID(conversion.EncodeStateID(expected)) - if diff := deep.Equal(expected, got); diff != nil { - t.Fatalf("Bad testEncodeDecodeID return \n got = %#v\nwant = %#v \ndiff = %#v", got, expected, diff) + if !reflect.DeepEqual(expected, got) { + t.Fatalf("Bad testEncodeDecodeID return \n got = %#v\nwant = %#v", got, expected) } } @@ -28,7 +28,7 @@ func TestDecodeID(t *testing.T) { got := conversion.DecodeStateID(expected) got2 := conversion.DecodeStateID(expected2) - if diff := deep.Equal(got, got2); diff != nil { - t.Fatalf("Bad TestDecodeID return \n got = %#v\nwant = %#v \ndiff = %#v", got, got2, diff) + if !reflect.DeepEqual(got, got2) { + t.Fatalf("Bad TestDecodeID return \n got = %#v\nwant = %#v", got, got2) } } diff --git a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go index 2279919f71..269e98010e 100644 --- a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go @@ -1,9 +1,9 @@ package cloudbackupsnapshot_test import ( + "reflect" "testing" - "github.com/go-test/deep" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupsnapshot" "go.mongodb.org/atlas-sdk/v20240530002/admin" ) @@ -20,8 +20,8 @@ func TestSplitSnapshotImportID(t *testing.T) { SnapshotId: "5cf5a45a9ccf6400e60981b7", } - if diff := deep.Equal(expected, got); diff != nil { - t.Errorf("Bad splitSnapshotImportID return \n got = %#v\nwant = %#v \ndiff = %#v", expected, *got, diff) + if !reflect.DeepEqual(expected, got) { + t.Errorf("Bad splitSnapshotImportID return \n got = %#v\nwant = %#v", expected, *got) } if _, err := cloudbackupsnapshot.SplitSnapshotImportID("5cf5a45a9ccf6400e60981b6projectname-environment-mongo-global-cluster5cf5a45a9ccf6400e60981b7"); err == nil { diff --git a/internal/service/eventtrigger/resource_event_trigger.go b/internal/service/eventtrigger/resource_event_trigger.go index 2a91685775..0aa40f2b27 100644 --- a/internal/service/eventtrigger/resource_event_trigger.go +++ b/internal/service/eventtrigger/resource_event_trigger.go @@ -7,9 +7,9 @@ import ( "fmt" "log" "net/http" + "reflect" "strings" - "github.com/go-test/deep" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -118,8 +118,7 @@ func Resource() *schema.Resource { log.Printf("[ERROR] json.Unmarshal %v", err) return false } - if diff := deep.Equal(&j, &j2); diff != nil { - log.Printf("[DEBUG] deep equal not passed: %v", diff) + if !reflect.DeepEqual(&j, &j2) { return false } @@ -140,8 +139,7 @@ func Resource() *schema.Resource { log.Printf("[ERROR] json.Unmarshal %v", err) return false } - if diff := deep.Equal(&j, &j2); diff != nil { - log.Printf("[DEBUG] deep equal not passed: %v", diff) + if !reflect.DeepEqual(&j, &j2) { return false } diff --git a/internal/service/searchindex/model_search_index.go b/internal/service/searchindex/model_search_index.go index 4fcb07b7a8..6b5adfbbb4 100644 --- a/internal/service/searchindex/model_search_index.go +++ b/internal/service/searchindex/model_search_index.go @@ -5,9 +5,9 @@ import ( "context" "encoding/json" "log" + "reflect" "strconv" - "github.com/go-test/deep" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -131,8 +131,7 @@ func diffSuppressJSON(k, old, newStr string, d *schema.ResourceData) bool { if err := json.Unmarshal([]byte(newStr), &j2); err != nil { log.Printf("[ERROR] cannot unmarshal new search index analyzer json %v", err) } - if diff := deep.Equal(&j, &j2); diff != nil { - log.Printf("[DEBUG] deep equal not passed: %v", diff) + if !reflect.DeepEqual(&j, &j2) { return false } From 214b6944170a18c2ceeeb8a2c4ac14d171abff40 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Wed, 17 Jul 2024 11:52:02 +0200 Subject: [PATCH 50/84] chore: Deletes modules folder (#2435) * remove modules folder * gitignore --- .gitignore | 1 - modules/examples/atlas-basic/main.tf | 23 -- modules/examples/atlas-basic/versions.tf | 10 - modules/examples/sagemaker/main.tf | 26 -- modules/examples/sagemaker/versions.tf | 10 - .../README.md | 21 -- .../outputs.tf | 10 - .../sagemaker.tf | 280 ------------------ .../variables.tf | 75 ----- .../versions.tf | 13 - .../terraform-mongodbatlas-basic/README.md | 42 --- .../terraform-mongodbatlas-basic/aws-vpc.tf | 59 ---- modules/terraform-mongodbatlas-basic/main.tf | 114 ------- .../terraform-mongodbatlas-basic/outputs.tf | 1 - .../terraform-mongodbatlas-basic/variables.tf | 217 -------------- .../terraform-mongodbatlas-basic/versions.tf | 14 - 16 files changed, 916 deletions(-) delete mode 100644 modules/examples/atlas-basic/main.tf delete mode 100644 modules/examples/atlas-basic/versions.tf delete mode 100644 modules/examples/sagemaker/main.tf delete mode 100644 modules/examples/sagemaker/versions.tf delete mode 100644 modules/terraform-mongodbatlas-amazon-sagemaker-integration/README.md delete mode 100644 modules/terraform-mongodbatlas-amazon-sagemaker-integration/outputs.tf delete mode 100644 modules/terraform-mongodbatlas-amazon-sagemaker-integration/sagemaker.tf delete mode 100644 modules/terraform-mongodbatlas-amazon-sagemaker-integration/variables.tf delete mode 100644 modules/terraform-mongodbatlas-amazon-sagemaker-integration/versions.tf delete mode 100644 modules/terraform-mongodbatlas-basic/README.md delete mode 100644 modules/terraform-mongodbatlas-basic/aws-vpc.tf delete mode 100644 modules/terraform-mongodbatlas-basic/main.tf delete mode 100644 modules/terraform-mongodbatlas-basic/outputs.tf delete mode 100644 modules/terraform-mongodbatlas-basic/variables.tf delete mode 100644 modules/terraform-mongodbatlas-basic/versions.tf diff --git a/.gitignore b/.gitignore index 059f2a8a95..d359912656 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,6 @@ terraform.tfplan terraform.tfstate .terraform.lock.hcl bin/ -modules-dev/ /pkg/ .vagrant/ *.backup diff --git a/modules/examples/atlas-basic/main.tf b/modules/examples/atlas-basic/main.tf deleted file mode 100644 index 1fb35f72a0..0000000000 --- a/modules/examples/atlas-basic/main.tf +++ /dev/null @@ -1,23 +0,0 @@ -module "atlas-basic" { - source = "../../terraform-mongodbatlas-basic" - - public_key = "" - private_key = "" - atlas_org_id = "" - - database_name = ["test1","test2"] - db_users = ["user1","user2"] - db_passwords = ["",""] - database_names = ["test-db1","test-db2"] - region = "US_EAST_1" - - aws_vpc_cidr_block = "1.0.0.0/16" - aws_vpc_egress = "0.0.0.0/0" - aws_vpc_ingress = "0.0.0.0/0" - aws_subnet_cidr_block1 = "1.0.1.0/24" - aws_subnet_cidr_block2 = "1.0.2.0/24" - - cidr_block = ["10.1.0.0/16","12.2.0.0/16"] - ip_address = ["208.169.90.207","63.167.210.250"] - -} \ No newline at end of file diff --git a/modules/examples/atlas-basic/versions.tf b/modules/examples/atlas-basic/versions.tf deleted file mode 100644 index 1d70a22799..0000000000 --- a/modules/examples/atlas-basic/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - required_version = ">= 1.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 5.0" - } - } -} \ No newline at end of file diff --git a/modules/examples/sagemaker/main.tf b/modules/examples/sagemaker/main.tf deleted file mode 100644 index f295a7684a..0000000000 --- a/modules/examples/sagemaker/main.tf +++ /dev/null @@ -1,26 +0,0 @@ - -# NOTE: -# go through the sagemaker-example/README.md file to create prerequisites and pass the inputs for the below - - -module "mongodb-atlas-analytics-amazon-sagemaker-integration" { - source = "../../terraform-mongodbatlas-amazon-sagemaker-integration" - - public_key = "" - private_key = "" - atlas_org_id = "" - - atlas_project_id = "" - realm_app_id = "" - database_name = "" - collection_name = "" - service_id = "" - - trigger_name = "" - - model_ecr_image_uri = "" - pull_lambda_ecr_image_uri = "" - model_data_s3_uri = "" - push_lambda_ecr_image_uri = "" - mongo_endpoint = "" -} diff --git a/modules/examples/sagemaker/versions.tf b/modules/examples/sagemaker/versions.tf deleted file mode 100644 index 1d70a22799..0000000000 --- a/modules/examples/sagemaker/versions.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - required_version = ">= 1.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 5.0" - } - } -} \ No newline at end of file diff --git a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/README.md b/modules/terraform-mongodbatlas-amazon-sagemaker-integration/README.md deleted file mode 100644 index 5f545d0150..0000000000 --- a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# quickstart-mongodb-atlas-analytics-amazon-sagemaker-integration - -## Overview - -![simple-quickstart-arch](https://user-images.githubusercontent.com/5663078/229119386-0dbc6e30-a060-465e-86dd-f89712b0fc49.png) - -This Partner Solutions template enables you to begin working with your machine learning models using MongoDB Atlas Cluster and Amazon SageMaker endpoints. With this template, you can utilize MongoDB as a data source and SageMaker for data analysis, streamlining the process of building and deploying machine learning models. - - -## MongoDB Atlas terraform Resources used by the templates - -- [mongodbatlas_event_trigger](../../mongodbatlas/data_source_mongodbatlas_event_trigger.go) - - -## Environment Configured by the Partner Solutions template -The Partner Solutions template will generate and configure the following resources: - - a [MongoDB Partner Event Bus](http://mongodb.com/docs/atlas/app-services/triggers/aws-eventbridge/#std-label-aws-eventbridge) - - a [database trigger](https://www.mongodb.com/docs/atlas/app-services/triggers/database-triggers/) with your Atlas Cluster - - lambda functions to run the machine learning model and send the classification results to your MongoDB Atlas Cluster. (See [iris_classifier](https://github.com/mongodb/mongodbatlas-cloudformation-resources/tree/master/examples/quickstart-mongodb-atlas-analytics-amazon-sagemaker-integration/sagemaker-example/iris_classifier) for an example of machine learning model to use with this template. See [lambda_functions](https://github.com/mongodb/mongodbatlas-cloudformation-resources/tree/master/examples/quickstart-mongodb-atlas-analytics-amazon-sagemaker-integration/sagemaker-example/lambda_functions) for an example of lambda functions to use to read and write data to your MongoDB Atlas cluster.) - - diff --git a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/outputs.tf b/modules/terraform-mongodbatlas-amazon-sagemaker-integration/outputs.tf deleted file mode 100644 index d19a8d32b1..0000000000 --- a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/outputs.tf +++ /dev/null @@ -1,10 +0,0 @@ - -output "sage_maker_endpoint_arn" { - description = "SageMaker endpoint ARN" - value = aws_sagemaker_endpoint.endpoint.arn -} - -output "event_bus_name" { - description = "Event Bus Name" - value = aws_cloudwatch_event_bus.event_bus_for_capturing_mdb_events.arn -} diff --git a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/sagemaker.tf b/modules/terraform-mongodbatlas-amazon-sagemaker-integration/sagemaker.tf deleted file mode 100644 index 6a8ac985ca..0000000000 --- a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/sagemaker.tf +++ /dev/null @@ -1,280 +0,0 @@ -provider "mongodbatlas" { - public_key = var.public_key - private_key = var.private_key -} - -data "aws_partition" "current" {} - -data "aws_region" "current" {} - -data "aws_caller_identity" "current" {} - - -resource "mongodbatlas_event_trigger" "trigger" { - project_id = var.atlas_project_id - name = var.trigger_name - type = "DATABASE" - app_id = var.realm_app_id - - config_database= var.database_name - config_collection = var.collection_name - config_operation_types = ["INSERT"] - config_service_id = var.service_id - config_full_document = true - - event_processors { - aws_eventbridge { - config_region = data.aws_region.current.name - config_account_id = data.aws_caller_identity.current.account_id - } - } -} - -resource "aws_iam_role" "sage_maker_execution_role" { - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Effect = "Allow" - Principal = { - Service = [ - "sagemaker.amazonaws.com" - ] - } - Action = [ - "sts:AssumeRole" - ] - } - ] - }) - path = "/" - managed_policy_arns = [ - "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSageMakerFullAccess", - "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSageMakerCanvasFullAccess" - ] - - inline_policy { - name = "qs-sagemaker-execution-policy" - policy = jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Effect = "Allow", - Action = "s3:GetObject", - Resource = "arn:${data.aws_partition.current.partition}:s3:::*" - } - ] - }) - } -} - -resource "aws_sagemaker_model" "model" { - primary_container { - image = var.model_ecr_image_uri - model_data_url = var.model_data_s3_uri - mode = "SingleModel" - environment = { - SAGEMAKER_PROGRAM = "inference.py" - SAGEMAKER_SUBMIT_DIRECTORY = var.model_data_s3_uri - } - } - execution_role_arn = aws_iam_role.sage_maker_execution_role.arn -} - -resource "aws_sagemaker_endpoint_configuration" "endpoint_config" { - production_variants { - initial_instance_count = 1 - initial_variant_weight = 1.0 - instance_type = "ml.c5.large" - model_name = aws_sagemaker_model.model.name - variant_name = aws_sagemaker_model.model.name - } -} - -resource "aws_sagemaker_endpoint" "endpoint" { - endpoint_config_name = aws_sagemaker_endpoint_configuration.endpoint_config.name -} - -resource "aws_cloudwatch_event_bus" "event_bus_for_capturing_mdb_events" { - depends_on = [ mongodbatlas_event_trigger.trigger ] - event_source_name = "aws.partner/mongodb.com/stitch.trigger/${mongodbatlas_event_trigger.trigger.trigger_id}" - name = "aws.partner/mongodb.com/stitch.trigger/${mongodbatlas_event_trigger.trigger.trigger_id}" -} - -resource "aws_cloudwatch_event_bus" "event_bus_for_sage_maker_results" { - name = "qs-mongodb-sagemaker-results" -} - -resource "aws_lambda_function" "lambda_function_to_read_mdb_events" { - function_name = "pull-mdb-events" - package_type = "Image" - image_uri = var.pull_lambda_ecr_image_uri - role = aws_iam_role.pull_lambda_function_role.arn - environment { - variables = { - model_endpoint = aws_sagemaker_endpoint.endpoint.name - region_name = data.aws_region.current.name - eventbus_name = aws_cloudwatch_event_bus.event_bus_for_sage_maker_results.arn - } - } - architectures = [ - "x86_64" - ] - memory_size = 1024 - timeout = 300 -} - -resource "aws_cloudwatch_event_rule" "event_rule_to_match_mdb_events" { - description = "Event Rule to match MongoDB change events." - event_bus_name = aws_cloudwatch_event_bus.event_bus_for_capturing_mdb_events.name - event_pattern = jsonencode({ - account = [ - data.aws_caller_identity.current.account_id - ] - }) - is_enabled = true - name = "pull-mdb-events" -} - -resource "aws_cloudwatch_event_target" "read_mdb_event_target" { - event_bus_name = aws_cloudwatch_event_bus.event_bus_for_capturing_mdb_events.name - rule = aws_cloudwatch_event_rule.event_rule_to_match_mdb_events.name - target_id = "EventRuleToReadMatchMDBEventsID" - arn = aws_lambda_function.lambda_function_to_read_mdb_events.arn -} - -resource "aws_iam_role" "pull_lambda_function_role" { - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Effect = "Allow" - Principal = { - Service = [ - "lambda.amazonaws.com" - ] - } - Action = [ - "sts:AssumeRole" - ] - } - ] - }) - path = "/" - managed_policy_arns = [ - "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" - ] - inline_policy { - name = "sagemaker-endpoint-invokation-policy" - policy = jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Effect = "Allow", - Action = "sagemaker:InvokeEndpoint", - Resource = aws_sagemaker_endpoint.endpoint.arn - }, - { - Effect = "Allow", - Action = "events:PutEvents", - Resource = aws_cloudwatch_event_bus.event_bus_for_sage_maker_results.arn - } - ] - }) - } -} - -resource "aws_lambda_function" "lambda_function_to_write_to_mdb" { - function_name = "push_lambda_function" - package_type = "Image" - role = aws_iam_role.push_lambda_function_role.arn - image_uri = var.push_lambda_ecr_image_uri - environment { - variables = { - mongo_endpoint = var.mongo_endpoint - dbname = var.database_name - } - } - architectures = [ - "x86_64" - ] - memory_size = 1024 - timeout = 300 -} - -resource "aws_iam_role" "push_lambda_function_role" { - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Effect = "Allow" - Principal = { - Service = [ - "lambda.amazonaws.com" - ] - } - Action = [ - "sts:AssumeRole" - ] - } - ] - }) - path = "/" - managed_policy_arns = [ - "arn:${data.aws_partition.current.partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" - ] - inline_policy { - name = "sagemaker-endpoint-invokation-policy" - policy = jsonencode({ - Version = "2012-10-17", - Statement = [ - { - Effect = "Allow", - Action = "sagemaker:InvokeEndpoint", - Resource = aws_sagemaker_endpoint.endpoint.arn - }, - { - Effect = "Allow", - Action = "events:PutEvents", - Resource = aws_cloudwatch_event_rule.event_rule_to_match_mdb_events.arn - } - ] - }) - } -} - -resource "aws_cloudwatch_event_rule" "event_rule_to_capture_events_sent_from_lambda_function" { - description = "Event Rule to match result events returned by pull Lambda." - event_bus_name = aws_cloudwatch_event_bus.event_bus_for_sage_maker_results.name - event_pattern = jsonencode({ - source = [ - "user-event" - ] - detail-type = [ - "user-preferences" - ] - }) - is_enabled = true - name = "push-to-mongodb" -} - -resource "aws_cloudwatch_event_target" "write_event_from_lambda_to_target" { - event_bus_name = aws_cloudwatch_event_bus.event_bus_for_sage_maker_results.name - rule = aws_cloudwatch_event_rule.event_rule_to_capture_events_sent_from_lambda_function.name - target_id = "EventRuleToCaptureEventsSentFromLambdaFunctionID" - arn = aws_lambda_function.lambda_function_to_write_to_mdb.arn -} - -resource "aws_lambda_permission" "event_bridge_lambda_permission1" { - function_name = aws_lambda_function.lambda_function_to_read_mdb_events.arn - action = "lambda:InvokeFunction" - principal = "events.amazonaws.com" - source_arn = aws_cloudwatch_event_rule.event_rule_to_match_mdb_events.arn -} - -resource "aws_lambda_permission" "event_bridge_lambda_permission2" { - function_name = aws_lambda_function.lambda_function_to_write_to_mdb.arn - action = "lambda:InvokeFunction" - principal = "events.amazonaws.com" - source_arn = aws_cloudwatch_event_rule.event_rule_to_capture_events_sent_from_lambda_function.arn -} \ No newline at end of file diff --git a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/variables.tf b/modules/terraform-mongodbatlas-amazon-sagemaker-integration/variables.tf deleted file mode 100644 index e788272a9b..0000000000 --- a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/variables.tf +++ /dev/null @@ -1,75 +0,0 @@ -variable "atlas_org_id" { - description = "Atlas organization id" - type = string -} -variable "public_key" { - description = "Public API key to authenticate to Atlas" - type = string -} -variable "private_key" { - description = "Private API key to authenticate to Atlas" - type = string -} - - -variable profile { - description = "A secret with name cfn/atlas/profile/{Profile}" - default = "default" - type = string -} - -variable atlas_project_id { - description = "Atlas Project ID." - type = string -} - -variable database_name { - description = "Database name for the trigger." - type = string -} - -variable collection_name { - description = "Collection name for the trigger." - type = string -} - -variable service_id { - description = "Service ID." - type = string -} - -variable realm_app_id { - description = "Realm App ID." - type = string -} - -variable model_data_s3_uri { - description = "The S3 path where the model artifacts, which result from model training, are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix)." - type = string -} - -variable model_ecr_image_uri { - description = "AWS managed Deep Learning Container Image URI or your custom Image URI from ECR to deploy and run the model." - type = string -} - -variable pull_lambda_ecr_image_uri { - description = "ECR image URI of the Lambda function to read MongoDB events from EventBridge." - type = string -} - -variable push_lambda_ecr_image_uri { - description = "ECR image URI of the Lambda function to write results back to MongoDB." - type = string -} - -variable mongo_endpoint { - description = "Your MongoDB endpoint to push results by Lambda function." - type = string -} - -variable "trigger_name" { - description = "value of trigger name" - type = string - -} diff --git a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/versions.tf b/modules/terraform-mongodbatlas-amazon-sagemaker-integration/versions.tf deleted file mode 100644 index 68b0b35a68..0000000000 --- a/modules/terraform-mongodbatlas-amazon-sagemaker-integration/versions.tf +++ /dev/null @@ -1,13 +0,0 @@ -terraform { - required_providers { - mongodbatlas = { - source = "mongodb/mongodbatlas" - version = "1.12.1" - } - aws = { - source = "hashicorp/aws" - version = "~> 5.17.0" - } - } - required_version = ">= 0.13" -} \ No newline at end of file diff --git a/modules/terraform-mongodbatlas-basic/README.md b/modules/terraform-mongodbatlas-basic/README.md deleted file mode 100644 index ff2e72d91b..0000000000 --- a/modules/terraform-mongodbatlas-basic/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# quickstart-mongodb-atlas - - - -## Overview - -![image](https://user-images.githubusercontent.com/5663078/229103723-4c6b9ab1-9492-47ba-b04d-7f29079e3817.png) - -The Atlas Partner Solutions templates allow you to set up all you need to start using MongoDB Atlas. We provide four different templates: - -- Deploy MongoDB Atlas without VPC peering. This option peers MongoDB Atlas with your existing VPC. -- Deploy MongoDB Atlas with VPC peering into a new VPC (end-to-end deployment). This option builds a complete MongoDB Atlas environment within AWS consisting of a project, cluster, and more. -- Deploy MongoDB Atlas with VPC peering into an existing VPC. This option peers MongoDB Atlas with a new VPC. -- Deploy MongoDB Atlas with Private Endpoint. This option connects MongoDB Atlas AWS VPC using Private Endpoint. - -All the quickstart templates create an Atlas Project, Cluster, Database User and enable public access into your cluster. - - - -## MongoDB Atlas CFN Resources used by the templates - -- [MongoDB::Atlas::Cluster](../../mongodbatlas/resource_mongodbatlas_cluster.go) -- [MongoDB::Atlas::ProjectIpAccessList](../../mongodbatlas/fw_resource_mongodbatlas_project_ip_access_list.go) -- [MongoDB::Atlas::DatabaseUser](../../mongodbatlas/fw_resource_mongodbatlas_database_user.go) -- [MongoDB::Atlas::Project](../../mongodbatlas/fw_resource_mongodbatlas_project.go) -- [MongoDB::Atlas::NetworkPeering](../../mongodbatlas/resource_mongodbatlas_network_peering.go) -- [MongoDB::Atlas::NetworkContainer](../../mongodbatlas/resource_mongodbatlas_network_container.go) -- [MongoDB::Atlas::PrivateEndpoint](../../mongodbatlas/resource_mongodbatlas_privatelink_endpoint.go) - - -## Environment Configured by the Partner Solution templates -All Partner Solutions templates will generate the following resources: -- An Atlas Project in the organization that was provided as input. -- An Atlas Cluster with authentication and authorization enabled, which cannot be accessed through the public internet. -- A Database user that can access the cluster. -- The IP address range provided as input will be added to the Atlas access list, allowing the cluster to be accessed through the public internet. - -The specific resources that will be created depend on which Partner Solutions template is used: - -- A new AWS VPC (Virtual Private Cloud) will be created. -- A VPC peering connection will be established between the MongoDB Atlas VPC (where your cluster is located) and the VPC on AWS. - diff --git a/modules/terraform-mongodbatlas-basic/aws-vpc.tf b/modules/terraform-mongodbatlas-basic/aws-vpc.tf deleted file mode 100644 index 6932444053..0000000000 --- a/modules/terraform-mongodbatlas-basic/aws-vpc.tf +++ /dev/null @@ -1,59 +0,0 @@ -resource "aws_vpc_endpoint" "vpce_east" { - vpc_id = aws_vpc.vpc_east.id - service_name = mongodbatlas_privatelink_endpoint.pe_east.endpoint_service_name - vpc_endpoint_type = "Interface" - subnet_ids = [aws_subnet.subnet_east_a.id, aws_subnet.subnet_east_b.id] - security_group_ids = [aws_security_group.sg_east.id] -} - -resource "aws_vpc" "vpc_east" { - cidr_block = var.aws_vpc_cidr_block - enable_dns_hostnames = true - enable_dns_support = true -} - -resource "aws_internet_gateway" "ig_east" { - vpc_id = aws_vpc.vpc_east.id -} - -resource "aws_route" "route_east" { - route_table_id = aws_vpc.vpc_east.main_route_table_id - destination_cidr_block = var.aws_route_table_cidr_block - gateway_id = aws_internet_gateway.ig_east.id -} - -resource "aws_subnet" "subnet_east_a" { - vpc_id = aws_vpc.vpc_east.id - cidr_block = var.aws_subnet_cidr_block1 - map_public_ip_on_launch = true - availability_zone = var.aws_subnet_availability_zone1 -} - -resource "aws_subnet" "subnet_east_b" { - vpc_id = aws_vpc.vpc_east.id - cidr_block = var.aws_subnet_cidr_block2 - map_public_ip_on_launch = false - availability_zone = var.aws_subnet_availability_zone2 -} - -resource "aws_security_group" "sg_east" { - name_prefix = "default-" - description = "Default security group for all instances in vpc" - vpc_id = aws_vpc.vpc_east.id - ingress { - from_port = var.aws_sg_ingress_from_port - to_port = var.aws_sg_ingress_to_port - protocol = var.aws_sg_ingress_protocol - cidr_blocks = [ - var.aws_vpc_cidr_block, - ] - } - egress { - from_port = var.aws_sg_egress_from_port - to_port = var.aws_sg_egress_to_port - protocol = var.aws_sg_egress_protocol - cidr_blocks = [ - var.aws_vpc_cidr_block - ] - } -} diff --git a/modules/terraform-mongodbatlas-basic/main.tf b/modules/terraform-mongodbatlas-basic/main.tf deleted file mode 100644 index a2e222513b..0000000000 --- a/modules/terraform-mongodbatlas-basic/main.tf +++ /dev/null @@ -1,114 +0,0 @@ -provider "mongodbatlas" { - public_key = var.public_key - private_key = var.private_key -} -locals { - ip_address_list = [ - for ip in var.ip_address : - { - ip_address = ip - comment = "IP Address ${ip}" - } - ] - - cidr_block_list = [ - for cidr in var.cidr_block : - { - cidr_block = cidr - comment = "CIDR Block ${cidr}" - } - ] -} - -# Project Resource -resource "mongodbatlas_project" "project" { - name = var.project_name - org_id = var.atlas_org_id -} - - -# IP Access List with IP Address -resource "mongodbatlas_project_ip_access_list" "ip" { - for_each = { - for index, ip in local.ip_address_list : - ip.comment => ip - } - project_id =mongodbatlas_project.project.id - ip_address = each.value.ip_address - comment = each.value.comment -} - -# IP Access List with CIDR Block -resource "mongodbatlas_project_ip_access_list" "cidr" { - - for_each = { - for index, cidr in local.cidr_block_list : - cidr.comment => cidr - } - project_id =mongodbatlas_project.project.id - cidr_block = each.value.cidr_block - comment = each.value.comment -} - -resource "mongodbatlas_cluster" "cluster" { - project_id = mongodbatlas_project.project.id - name = var.cluster_name - mongo_db_major_version = var.mongo_version - cluster_type = var.cluster_type - replication_specs { - num_shards = var.num_shards - regions_config { - region_name = var.region - electable_nodes = var.electable_nodes - priority = var.priority - read_only_nodes = var.read_only_nodes - } - } - # Provider Settings "block" - auto_scaling_disk_gb_enabled = var.auto_scaling_disk_gb_enabled - provider_name = var.provider_name - disk_size_gb = var.disk_size_gb - provider_instance_size_name = var.provider_instance_size_name -} - -# DATABASE USER -resource "mongodbatlas_database_user" "user" { - count = length(var.db_users) - username = var.db_users[count.index] - password = var.db_passwords[count.index] - project_id = mongodbatlas_project.project.id - auth_database_name = "admin" - - roles { - role_name = var.role_name - database_name = var.database_names[count.index] - } - - labels { - key = "Name" - value = var.database_names[count.index] - } - - scopes { - name = mongodbatlas_cluster.cluster.name - type = "CLUSTER" - } -} - -resource "mongodbatlas_privatelink_endpoint" "pe_east" { - project_id = mongodbatlas_project.project.id - provider_name = var.provider_name - region = var.aws_region -} - -resource "mongodbatlas_privatelink_endpoint_service" "pe_east_service" { - project_id = mongodbatlas_project.project.id - private_link_id = mongodbatlas_privatelink_endpoint.pe_east.private_link_id - endpoint_service_id = aws_vpc_endpoint.vpce_east.id - provider_name = var.provider_name -} - - -output "project_id" { - value = mongodbatlas_project.project.id -} \ No newline at end of file diff --git a/modules/terraform-mongodbatlas-basic/outputs.tf b/modules/terraform-mongodbatlas-basic/outputs.tf deleted file mode 100644 index 8b13789179..0000000000 --- a/modules/terraform-mongodbatlas-basic/outputs.tf +++ /dev/null @@ -1 +0,0 @@ - diff --git a/modules/terraform-mongodbatlas-basic/variables.tf b/modules/terraform-mongodbatlas-basic/variables.tf deleted file mode 100644 index 871ba46898..0000000000 --- a/modules/terraform-mongodbatlas-basic/variables.tf +++ /dev/null @@ -1,217 +0,0 @@ -variable "atlas_org_id" { - description = "Atlas organization id" - type = string -} -variable "public_key" { - description = "Public API key to authenticate to Atlas" - type = string -} -variable "private_key" { - description = "Private API key to authenticate to Atlas" - type = string -} - -# project -variable "project_name" { - description = "Atlas project name" - default = "TenantUpgradeTest" - type = string -} - -#cluster -variable "cluster_name" { - description = "Atlas cluster name" - default = "cluster" - type = string -} - -variable "cluster_type" { - description = "Atlas cluster type" - default = "REPLICASET" - type = string -} - -variable "num_shards" { - description = "Atlas cluster number of shards" - default = 1 - type = number -} - -variable "priority" { - description = "Atlas cluster priority" - default = 7 - type = number -} - -variable "read_only_nodes" { - description = "Atlas cluster number of read only nodes" - default = 0 - type = number -} -variable "electable_nodes" { - description = "Atlas cluster number of electable nodes" - default = 3 - type = number -} - -variable "auto_scaling_disk_gb_enabled" { - description = "Atlas cluster auto scaling disk enabled" - default = false - type = bool -} - -variable "disk_size_gb" { - description = "Atlas cluster disk size in GB" - default = 10 - type = number -} -variable "provider_name" { - description = "Atlas cluster provider name" - default = "AWS" - type = string -} -variable "backing_provider_name" { - description = "Atlas cluster backing provider name" - default = "AWS" - type = string -} -variable "provider_instance_size_name" { - description = "Atlas cluster provider instance name" - default = "M10" - type = string -} - -variable "region" { - description = "Atlas cluster region" - default = "US_EAST_1" - type = string -} -variable "aws_region"{ - description = "AWS region" - default = "us-east-1" - type = string -} - -variable "mongo_version" { - description = "Atlas cluster version" - default = "4.4" - type = string -} - - -variable "user" { - description = "MongoDB Atlas User" - type = list(string) - default = ["dbuser1", "dbuser2"] -} -variable "db_passwords" { - description = "MongoDB Atlas User Password" - type = list(string) -} -variable "database_names" { - description = "The Database in the cluster" - type = list(string) -} - -# database user -variable "role_name" { - description = "Atlas database user role name" - default = "readWrite" - type = string -} - -# IP Access List -variable "cidr_block" { - description = "IP Access List CIDRs" - type = list(string) -} - -variable "ip_address" { - description = "IP Access List IP Addresses" - type = list(string) -} -# aws - -variable "aws_vpc_cidr_block" { - description = "AWS VPC CIDR block" - default = "10.0.0.0/16" - type = string -} - -# aws vpc -variable "aws_vpc_ingress" { - description = "AWS VPC ingress CIDR block" - type = string -} - -variable "aws_vpc_egress" { - description = "AWS VPC egress CIDR block" - type = string -} - -variable "aws_route_table_cidr_block" { - description = "AWS route table CIDR block" - default = "0.0.0.0/0" - type = string -} - -variable "aws_subnet_cidr_block1" { - description = "AWS subnet CIDR block" - type = string -} -variable "aws_subnet_cidr_block2" { - description = "AWS subnet CIDR block" - type = string -} - -variable "aws_subnet_availability_zone1" { - description = "AWS subnet availability zone" - default = "us-east-1a" - type = string -} -variable "aws_subnet_availability_zone2" { - description = "AWS subnet availability zone" - default = "us-east-1b" - type = string -} - -variable "aws_sg_ingress_from_port" { - description = "AWS security group ingress from port" - default = 27017 - type = number -} - -variable "aws_sg_ingress_to_port" { - description = "AWS security group ingress to port" - default = 27017 - type = number -} - -variable "aws_sg_ingress_protocol" { - description = "AWS security group ingress protocol" - default = "tcp" - type = string -} - -variable "aws_sg_egress_from_port" { - description = "AWS security group egress from port" - default = 0 - type = number -} - -variable "aws_sg_egress_to_port" { - description = "AWS security group egress to port" - default = 0 - type = number -} - -variable "aws_sg_egress_protocol" { - description = "AWS security group egress protocol" - default = "-1" - type = string -} - -variable "db_users" { - description = "Atlas database users" - type = list(string) -} \ No newline at end of file diff --git a/modules/terraform-mongodbatlas-basic/versions.tf b/modules/terraform-mongodbatlas-basic/versions.tf deleted file mode 100644 index 051942514f..0000000000 --- a/modules/terraform-mongodbatlas-basic/versions.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - required_providers { - mongodbatlas = { - source = "mongodb/mongodbatlas" - version = "1.12.1" - } - aws = { - source = "hashicorp/aws" - version = "~> 5.0" - } - } - required_version = ">= 0.13" -} - From de0ab51ba974b5736b37f23b0ae0edd65e499bbe Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 18 Jul 2024 10:22:31 +0200 Subject: [PATCH 51/84] chore: Makes sure doc generation is up-to-date (#2441) * generate doc * split in runs * detect changes * TEMPORARY: change 3 files to trigger doc failures * rename * Revert "TEMPORARY: change 3 files to trigger doc failures" This reverts commit cc36481d9682f46792203662db610806d6593d89. --- .github/workflows/code-health.yml | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index 5d70673594..bd20a1cef1 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -60,7 +60,30 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - name: Run ShellCheck uses: bewuethr/shellcheck-action@d01912909579c4b1a335828b8fca197fbb8e0aa4 + generate-doc-check: + runs-on: ubuntu-latest + permissions: {} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - run: make tools # all resources with auto-generated doc must be specified below here + - name: Doc for control_plane_ip_addresses + run: export resource_name=control_plane_ip_addresses && make generate-doc + - name: Doc for push_based_log_export + run: export resource_name=push_based_log_export && make generate-doc + - name: Doc for search_deployment + run: export resource_name=search_deployment && make generate-doc + - name: Find mutations + id: self_mutation + run: |- + git add . + git diff --staged --patch --exit-code > .repo.patch || echo "self_mutation_happened=true" >> "${GITHUB_OUTPUT}" + - name: Fail build on mutation + if: steps.self_mutation.outputs.self_mutation_happened + run: |- + echo "::error::Files were changed during build (see build log). If this was triggered from a fork, you will need to update your branch." + cat .repo.patch + exit 1 call-acceptance-tests-workflow: - needs: [build, lint, shellcheck, unit-test] + needs: [build, lint, shellcheck, unit-test, generate-doc-check] secrets: inherit uses: ./.github/workflows/acceptance-tests.yml From 9d5c3ee74c21ee1bd1dea09f3531e07ee613d241 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 18 Jul 2024 10:23:26 +0200 Subject: [PATCH 52/84] chore: Enables GitHub Action linter errors in GitHub (#2440) * TEMPORARY: make action linter fail * problem matcher * Revert "TEMPORARY: make action linter fail" This reverts commit 2ea3cd5fee4836f9275f59d5daaf72213e78aabe. --- .github/actionlint-matcher.json | 18 ++++++++++++++++++ .github/workflows/code-health.yml | 5 ++++- 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 .github/actionlint-matcher.json diff --git a/.github/actionlint-matcher.json b/.github/actionlint-matcher.json new file mode 100644 index 0000000000..dcb9d323a2 --- /dev/null +++ b/.github/actionlint-matcher.json @@ -0,0 +1,18 @@ +{ + "problemMatcher": [ + { + "owner": "actionlint", + "severity": "warning", + "pattern": [ + { + "regexp": "^(?:\\x1b\\[\\d+m)?(.+?)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*: (?:\\x1b\\[\\d+m)*(.+?)(?:\\x1b\\[\\d+m)* \\[(.+?)\\]$", + "file": 1, + "line": 2, + "column": 3, + "message": 4, + "code": 5 + } + ] + } + ] +} \ No newline at end of file diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index bd20a1cef1..78b83ec824 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -51,7 +51,10 @@ jobs: with: version: v1.59.0 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version - name: actionlint - run: make tools && actionlint -verbose -color + run: | + make tools + echo "::add-matcher::.github/actionlint-matcher.json" + actionlint -color shell: bash shellcheck: runs-on: ubuntu-latest From b4b9109e277fb547315c0fea42131c426d4fb788 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 18 Jul 2024 10:24:05 +0200 Subject: [PATCH 53/84] update version (#2439) --- .github/workflows/code-health.yml | 2 +- GNUmakefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index 78b83ec824..0be3c9f1f1 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -49,7 +49,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 with: - version: v1.59.0 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version + version: v1.59.1 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version - name: actionlint run: | make tools diff --git a/GNUmakefile b/GNUmakefile index 7505b4ac43..1e827d9b77 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -18,7 +18,7 @@ GITTAG=$(shell git describe --always --tags) VERSION=$(GITTAG:v%=%) LINKER_FLAGS=-s -w -X 'github.com/mongodb/terraform-provider-mongodbatlas/version.ProviderVersion=${VERSION}' -GOLANGCI_VERSION=v1.59.0 # Also update golangci-lint GH action in code-health.yml when updating this version +GOLANGCI_VERSION=v1.59.1 # Also update golangci-lint GH action in code-health.yml when updating this version export PATH := $(shell go env GOPATH)/bin:$(PATH) export SHELL := env PATH=$(PATH) /bin/bash From 5cad7be23d801328954ce8dea84a903fc2cdced4 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 18 Jul 2024 11:00:00 +0200 Subject: [PATCH 54/84] doc: Updates examples & docs that use replicaSet clusters (#2428) * update basic examples * fix linter * fix tf-validate * update tflint version * fix validate * remove tf linter exceptions * make linter fail * simplify and show linter errors in GH * tlint problem matcher * problem matcher * minimum severity warning * fix linter * make tf-validate logic easier to be run in local * less verbose tf init * fix /mongodbatlas_network_peering/aws * doc for backup_compliance_policy * fix container_id reference * fix mongodbatlas_network_peering/azure * use temp fodler * fix examples/mongodbatlas_network_peering/gcp * remaining examples * fix mongodbatlas_clusters * fix adv_cluster doc * remaining doc changes * fix typo * fix examples with deprecated arguments * get the first value for containter_id * container_id in doc * address feedback --- .github/tflint-matcher.json | 19 ++ .github/workflows/examples.yml | 33 +--- .gitignore | 1 + GNUmakefile | 6 +- docs/data-sources/advanced_clusters.md | 10 +- docs/data-sources/backup_compliance_policy.md | 30 ++-- docs/data-sources/cloud_backup_schedule.md | 30 ++-- .../cloud_provider_snapshot_backup_policy.md | 40 +++-- docs/data-sources/data_lake_pipeline.md | 27 +-- docs/data-sources/data_lake_pipeline_run.md | 2 +- docs/data-sources/data_lake_pipeline_runs.md | 2 +- docs/data-sources/ldap_verify.md | 28 +-- docs/resources/advanced_cluster.md | 8 +- docs/resources/backup_compliance_policy.md | 35 ++-- docs/resources/cloud_backup_schedule.md | 124 ++++++++----- docs/resources/cloud_backup_snapshot.md | 56 +++--- .../cloud_backup_snapshot_restore_job.md | 146 ++++++++------- docs/resources/cloud_provider_snapshot.md | 58 +++--- .../cloud_provider_snapshot_backup_policy.md | 124 +++++++------ .../cloud_provider_snapshot_restore_job.md | 118 +++++++------ docs/resources/data_lake_pipeline.md | 27 +-- docs/resources/encryption_at_rest.md | 31 ++-- docs/resources/ldap_verify.md | 28 +-- docs/resources/network_peering.md | 167 ++++++++---------- .../versions.tf | 2 +- .../create-and-assign-pak/versions.tf | 2 +- .../versions.tf | 2 +- .../main.tf | 30 ++-- .../point-in-time/main.tf | 38 ++-- .../atlas_cluster.tf | 34 ++-- examples/mongodbatlas_database_user/main.tf | 2 +- .../aws/atlas-cluster/README.md | 8 +- .../aws/atlas-cluster/main.tf | 19 +- .../azure/atlas.tf | 31 ++-- .../azure/outputs.tf | 2 +- .../mongodbatlas_network_peering/aws/main.tf | 34 ++-- .../azure/atlas.tf | 35 ++-- .../azure/variables.tf | 3 - .../gcp/cluster.tf | 52 +++--- examples/mongodbatlas_online_archive/main.tf | 11 +- .../aws/cluster/README.md | 4 +- .../aws/cluster/atlas-cluster.tf | 32 ++-- .../aws/cluster/output.tf | 2 +- .../azure/main.tf | 12 +- .../azure/main.tf | 12 +- .../versions.tf | 4 +- .../third-party-integration.tf | 1 - examples/starter/Readme.md | 4 +- examples/starter/atlas_cluster.tf | 33 ++-- examples/starter/variables.tf | 4 - scripts/tf-validate.sh | 37 ++-- scripts/tflint.sh | 34 ---- 52 files changed, 876 insertions(+), 758 deletions(-) create mode 100644 .github/tflint-matcher.json delete mode 100755 scripts/tflint.sh diff --git a/.github/tflint-matcher.json b/.github/tflint-matcher.json new file mode 100644 index 0000000000..888ff582cb --- /dev/null +++ b/.github/tflint-matcher.json @@ -0,0 +1,19 @@ +{ + "problemMatcher": [ + { + "owner": "tflint-compact", + "severity": "warning", + "pattern": [ + { + "regexp": "^(.+):(\\d+):(\\d+):\\s(Error|Warning|Notice)\\s-\\s(.+)\\s\\((.+)\\)$", + "file": 1, + "line": 2, + "column": 3, + "severity": 4, + "message": 5, + "code": 6 + } + ] + } + ] +} \ No newline at end of file diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 93d8b69b48..daa8791a50 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -15,52 +15,27 @@ env: jobs: tf-validate: runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - terraform_version: ["${{vars.TF_VERSION_LATEST}}"] permissions: {} steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - with: - fetch-depth: 0 - - run: echo "GO_VERSION=$(cat .go-version)" >> "${GITHUB_ENV}" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - name: go build - run: go build -o terraform-plugin-dir/terraform-provider-mongodbatlas-dev_v99.99.99_x5 . - - name: override plugin - run: | - # For newer versions - mkdir -p ~/.terraform.d/plugins/registry.terraform.io/hashicorp/mongodbatlas-dev/99.99.99/"$(go env GOOS)"_"$(go env GOARCH)"/ - cp terraform-plugin-dir/terraform-provider-mongodbatlas-dev_v99.99.99_x5 ~/.terraform.d/plugins/registry.terraform.io/hashicorp/mongodbatlas-dev/99.99.99/"$(go env GOOS)"_"$(go env GOARCH)"/ - - name: replace names of provider for local development - run: grep --include=\*versions.tf -rnl './examples' -e 'source = "mongodb/mongodbatlas"' | xargs sed -i s@mongodb/mongodbatlas@hashicorp/mongodbatlas-dev@g - - name: remove version of provider for local development - run: grep --include=\*versions.tf -rnl './examples' -e 'version =' | xargs sed -i '/^\s*version =/d' - - name: Fix format after substitutions - run: grep --include=\*versions.tf -rnl './examples' -e 'source[[:space:]]\+=' | xargs sed -i 's@\(\([[:space:]]*\)source\)[[:space:]]\+=[[:space:]]*@\2source = @g' - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 with: - terraform_version: ${{ matrix.terraform_version }} - # Needed to use the output of `terraform validate -json` + terraform_version: ${{ vars.TF_VERSION_LATEST }} terraform_wrapper: false - name: tf-validate run: make tools tf-validate tflint: runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - terraform_version: ["${{vars.TF_VERSION_LATEST}}"] permissions: {} steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - with: - fetch-depth: 0 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: tflint - run: make tools tflint + run: | + echo "::add-matcher::.github/tflint-matcher.json" + make tools tflint diff --git a/.gitignore b/.gitignore index d359912656..6366b3c11a 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ terraform.tfplan terraform.tfstate .terraform.lock.hcl bin/ +bin-examples/ /pkg/ .vagrant/ *.backup diff --git a/GNUmakefile b/GNUmakefile index 1e827d9b77..488873875d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -75,7 +75,7 @@ lint: tools: ## Install dev tools @echo "==> Installing dependencies..." go install github.com/icholy/gomajor@latest - go install github.com/terraform-linters/tflint@v0.49.0 + go install github.com/terraform-linters/tflint@v0.52.0 go install github.com/rhysd/actionlint/cmd/actionlint@latest go install golang.org/x/tools/go/analysis/passes/fieldalignment/cmd/fieldalignment@latest go install github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs@latest @@ -91,11 +91,11 @@ docs: .PHONY: tflint tflint: fmtcheck - @scripts/tflint.sh + tflint -f compact --recursive --minimum-failure-severity=warning .PHONY: tf-validate tf-validate: fmtcheck - @scripts/tf-validate.sh + scripts/tf-validate.sh .PHONY: link-git-hooks link-git-hooks: ## Install git hooks diff --git a/docs/data-sources/advanced_clusters.md b/docs/data-sources/advanced_clusters.md index 31c1c84479..f6bb290e63 100644 --- a/docs/data-sources/advanced_clusters.md +++ b/docs/data-sources/advanced_clusters.md @@ -1,6 +1,6 @@ -# Data Source: mongodbatlas_clusters +# Data Source: mongodbatlas_advanced_clusters -`mongodbatlas_cluster` describes all Advanced Clusters by the provided project_id. The data source requires your Project ID. +`mongodbatlas_advanced_clusters` describes all Advanced Clusters by the provided project_id. The data source requires your Project ID. -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. @@ -11,7 +11,7 @@ ## Example Usage ```terraform -resource "mongodbatlas_cluster" "example" { +resource "mongodbatlas_advanced_cluster" "example" { project_id = "" name = "cluster-test" cluster_type = "REPLICASET" @@ -29,8 +29,8 @@ resource "mongodbatlas_cluster" "example" { } } -data "mongodbatlas_clusters" "example" { - project_id = mongodbatlas_cluster.example.project_id +data "mongodbatlas_advanced_clusters" "example" { + project_id = mongodbatlas_advanced_cluster.example.project_id } ``` diff --git a/docs/data-sources/backup_compliance_policy.md b/docs/data-sources/backup_compliance_policy.md index abcc0a1687..411ce1d300 100644 --- a/docs/data-sources/backup_compliance_policy.md +++ b/docs/data-sources/backup_compliance_policy.md @@ -9,20 +9,28 @@ ## Example Usage ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 diff --git a/docs/data-sources/cloud_backup_schedule.md b/docs/data-sources/cloud_backup_schedule.md index a3bca33bc8..1aa383656a 100644 --- a/docs/data-sources/cloud_backup_schedule.md +++ b/docs/data-sources/cloud_backup_schedule.md @@ -7,20 +7,28 @@ ## Example Usage ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 diff --git a/docs/data-sources/cloud_provider_snapshot_backup_policy.md b/docs/data-sources/cloud_provider_snapshot_backup_policy.md index d06361850a..09724c9aa8 100644 --- a/docs/data-sources/cloud_provider_snapshot_backup_policy.md +++ b/docs/data-sources/cloud_provider_snapshot_backup_policy.md @@ -14,20 +14,28 @@ subcategory: "Deprecated" ## Example Usage ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -35,31 +43,31 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { policies { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id frequency_interval = 1 frequency_type = "hourly" retention_unit = "days" retention_value = 1 } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id frequency_interval = 1 frequency_type = "daily" retention_unit = "days" retention_value = 2 } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id frequency_interval = 4 frequency_type = "weekly" retention_unit = "weeks" retention_value = 3 } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id frequency_interval = 5 frequency_type = "monthly" retention_unit = "months" diff --git a/docs/data-sources/data_lake_pipeline.md b/docs/data-sources/data_lake_pipeline.md index 3e4d8aa410..fbceb31afa 100644 --- a/docs/data-sources/data_lake_pipeline.md +++ b/docs/data-sources/data_lake_pipeline.md @@ -14,16 +14,23 @@ resource "mongodbatlas_project" "projectTest" { } resource "mongodbatlas_advanced_cluster" "automated_backup_test" { - project_id = "63f4d4a47baeac59406dc131" - name = "automated-backup-test" - - provider_name = "GCP" - provider_region_name = "US_EAST_4" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots - mongo_db_major_version = "7.0" + project_id = var.project_id + name = "automated-backup-test" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "GCP" + region_name = "US_EAST_4" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } } - +} resource "mongodbatlas_data_lake_pipeline" "pipeline" { project_id = mongodbatlas_project.projectTest.project_id @@ -38,7 +45,7 @@ resource "mongodbatlas_data_lake_pipeline" "pipeline" { source { type = "ON_DEMAND_CPS" - cluster_name = mongodbatlas_cluster.automated_backup_test.name + cluster_name = mongodbatlas_advanced_cluster.automated_backup_test.name database_name = "sample_airbnb" collection_name = "listingsAndReviews" } diff --git a/docs/data-sources/data_lake_pipeline_run.md b/docs/data-sources/data_lake_pipeline_run.md index 29bf7389c4..fc7ba90cdb 100644 --- a/docs/data-sources/data_lake_pipeline_run.md +++ b/docs/data-sources/data_lake_pipeline_run.md @@ -21,7 +21,7 @@ resource "mongodbatlas_data_lake_pipeline" "pipeline" { } source { type = "ON_DEMAND_CPS" - cluster_name = mongodbatlas_cluster.clusterTest.name + cluster_name = mongodbatlas_advanced_cluster.clusterTest.name database_name = "sample_airbnb" collection_name = "listingsAndReviews" } diff --git a/docs/data-sources/data_lake_pipeline_runs.md b/docs/data-sources/data_lake_pipeline_runs.md index 4e44f5459c..8132b939a6 100644 --- a/docs/data-sources/data_lake_pipeline_runs.md +++ b/docs/data-sources/data_lake_pipeline_runs.md @@ -21,7 +21,7 @@ resource "mongodbatlas_data_lake_pipeline" "pipeline" { } source { type = "ON_DEMAND_CPS" - cluster_name = mongodbatlas_cluster.clusterTest.name + cluster_name = mongodbatlas_advanced_cluster.clusterTest.name database_name = "sample_airbnb" collection_name = "listingsAndReviews" } diff --git a/docs/data-sources/ldap_verify.md b/docs/data-sources/ldap_verify.md index 68a4e66fbe..3a4ec5c3f3 100644 --- a/docs/data-sources/ldap_verify.md +++ b/docs/data-sources/ldap_verify.md @@ -13,15 +13,23 @@ resource "mongodbatlas_project" "test" { org_id = "ORG ID" } -resource "mongodbatlas_cluster" "test" { - project_id = mongodbatlas_project.test.id - name = "NAME OF THE CLUSTER" - - // Provider Settings "block" - provider_name = "AWS" - provider_region_name = "US_EAST_2" - provider_instance_size_name = "M10" - cloud_backup = true //enable cloud provider snapshots +resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.test.id + name = "ClusterName" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud provider snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_ldap_verify" "test" { @@ -30,7 +38,7 @@ resource "mongodbatlas_ldap_verify" "test" { port = 636 bind_username = "USERNAME" bind_password = "PASSWORD" - depends_on = [mongodbatlas_cluster.test] + depends_on = [mongodbatlas_advanced_cluster.test] } data "mongodbatlas_ldap_verify" "test" { diff --git a/docs/resources/advanced_cluster.md b/docs/resources/advanced_cluster.md index ef93549c0f..e4eff9132a 100644 --- a/docs/resources/advanced_cluster.md +++ b/docs/resources/advanced_cluster.md @@ -303,21 +303,21 @@ resource "mongodbatlas_advanced_cluster" "cluster" { Standard ```terraform output "standard" { - value = mongodbatlas_cluster.cluster-test.connection_strings[0].standard + value = mongodbatlas_advanced_cluster.cluster.connection_strings[0].standard } # Example return string: standard = "mongodb://cluster-atlas-shard-00-00.ygo1m.mongodb.net:27017,cluster-atlas-shard-00-01.ygo1m.mongodb.net:27017,cluster-atlas-shard-00-02.ygo1m.mongodb.net:27017/?ssl=true&authSource=admin&replicaSet=atlas-12diht-shard-0" ``` Standard srv ```terraform output "standard_srv" { - value = mongodbatlas_cluster.cluster-test.connection_strings[0].standard_srv + value = mongodbatlas_advanced_cluster.cluster.connection_strings[0].standard_srv } # Example return string: standard_srv = "mongodb+srv://cluster-atlas.ygo1m.mongodb.net" ``` Private with Network peering and Custom DNS AWS enabled ```terraform output "private" { - value = mongodbatlas_cluster.cluster-test.connection_strings[0].private + value = mongodbatlas_advanced_cluster.cluster.connection_strings[0].private } # Example return string: private = "mongodb://cluster-atlas-shard-00-00-pri.ygo1m.mongodb.net:27017,cluster-atlas-shard-00-01-pri.ygo1m.mongodb.net:27017,cluster-atlas-shard-00-02-pri.ygo1m.mongodb.net:27017/?ssl=true&authSource=admin&replicaSet=atlas-12diht-shard-0" private = "mongodb+srv://cluster-atlas-pri.ygo1m.mongodb.net" @@ -325,7 +325,7 @@ private = "mongodb+srv://cluster-atlas-pri.ygo1m.mongodb.net" Private srv with Network peering and Custom DNS AWS enabled ```terraform output "private_srv" { - value = mongodbatlas_cluster.cluster-test.connection_strings[0].private_srv + value = mongodbatlas_advanced_cluster.cluster.connection_strings[0].private_srv } # Example return string: private_srv = "mongodb+srv://cluster-atlas-pri.ygo1m.mongodb.net" ``` diff --git a/docs/resources/backup_compliance_policy.md b/docs/resources/backup_compliance_policy.md index 47454a054e..cc2320df87 100644 --- a/docs/resources/backup_compliance_policy.md +++ b/docs/resources/backup_compliance_policy.md @@ -18,8 +18,8 @@ We first suggest disabling `mongodbatlas_backup_compliance_policy` resource, whi * For example, replace: ``` resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name ... } ``` @@ -37,21 +37,28 @@ We first suggest disabling `mongodbatlas_backup_compliance_policy` resource, whi ## Example Usage ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = var.region + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 diff --git a/docs/resources/cloud_backup_schedule.md b/docs/resources/cloud_backup_schedule.md index ab064c2968..4643b4c524 100644 --- a/docs/resources/cloud_backup_schedule.md +++ b/docs/resources/cloud_backup_schedule.md @@ -6,7 +6,7 @@ -> **NOTE:** If Backup Compliance Policy is enabled for the project for which this backup schedule is defined, you cannot modify the backup schedule for an individual cluster below the minimum requirements set in the Backup Compliance Policy. See [Backup Compliance Policy Prohibited Actions and Considerations](https://www.mongodb.com/docs/atlas/backup/cloud-backup/backup-compliance-policy/#configure-a-backup-compliance-policy). --> **NOTE:** When creating a backup schedule you **must either** use the `depends_on` clause to indicate the cluster to which it refers **or** specify the values of `project_id` and `cluster_name` as reference of the cluster resource (e.g. `cluster_name = mongodbatlas_cluster.my_cluster.name` - see the example below). Failure in doing so will result in an error when executing the plan. +-> **NOTE:** When creating a backup schedule you **must either** use the `depends_on` clause to indicate the cluster to which it refers **or** specify the values of `project_id` and `cluster_name` as reference of the cluster resource (e.g. `cluster_name = mongodbatlas_advanced_cluster.my_cluster.name` - see the example below). Failure in doing so will result in an error when executing the plan. In the Terraform MongoDB Atlas Provider 1.0.0 we have re-architected the way in which Cloud Backup Policies are manged with Terraform to significantly reduce the complexity. Due to this change we've provided multiple examples below to help express how this new resource functions. @@ -16,20 +16,28 @@ In the Terraform MongoDB Atlas Provider 1.0.0 we have re-architected the way in You can create a new cluster with `cloud_backup` enabled and then immediately overwrite the default cloud backup policy that Atlas creates by default at the same time with this example. ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // must be enabled in order to use cloud_backup_schedule resource +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # must be enabled in order to use cloud_backup_schedule resource + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -55,20 +63,28 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { You can enable `cloud_backup` in the Cluster resource and then use the `cloud_backup_schedule` resource with no policy items to remove the default policy that Atlas creates when you enable Cloud Backup. This allows you to then create a policy when you are ready to via Terraform. ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // must be enabled in order to use cloud_backup_schedule resource +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # must be enabled in order to use cloud_backup_schedule resource + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -83,20 +99,28 @@ If you followed the example to Create a Cluster with Cloud Backup Enabled but No The cluster already exists with `cloud_backup` enabled ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // must be enabled in order to use cloud_backup_schedule resource +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # must be enabled in order to use cloud_backup_schedule resource + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -138,20 +162,28 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { You can enable `cloud_backup` in the Cluster resource and then use the `cloud_backup_schedule` resource with a basic policy for Cloud Backup. ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "US_EAST_2" - provider_instance_size_name = "M10" - cloud_backup = true // must be enabled in order to use cloud_backup_schedule resource +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "clusterTest" + cluster_type = "REPLICASET" + backup_enabled = true # must be enabled in order to use cloud_backup_schedule resource + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_schedule" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -172,7 +204,7 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { "YEARLY", "ON_DEMAND"] region_name = "US_EAST_1" - replication_spec_id = mongodbatlas_cluster.my_cluster.replication_specs.*.id[0] + replication_spec_id = mongodbatlas_advanced_cluster.my_cluster.replication_specs.*.id[0] should_copy_oplogs = false } diff --git a/docs/resources/cloud_backup_snapshot.md b/docs/resources/cloud_backup_snapshot.md index 1563886195..ef67fe7ea9 100644 --- a/docs/resources/cloud_backup_snapshot.md +++ b/docs/resources/cloud_backup_snapshot.md @@ -10,32 +10,40 @@ On-demand snapshots happen immediately, unlike scheduled snapshots which occur a ## Example Usage ```terraform - resource "mongodbatlas_cluster" "my_cluster" { - project_id = "5cf5a45a9ccf6400e60981b6" - name = "MyCluster" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots - } - - resource "mongodbatlas_cloud_backup_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name - description = "myDescription" - retention_in_days = 1 - } - - resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { - project_id = mongodbatlas_cloud_backup_snapshot.test.project_id - cluster_name = mongodbatlas_cloud_backup_snapshot.test.cluster_name - snapshot_id = mongodbatlas_cloud_backup_snapshot.test.snapshot_id - delivery_type_config { - download = true +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } +} + +resource "mongodbatlas_cloud_backup_snapshot" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + description = "myDescription" + retention_in_days = 1 +} + +resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { + project_id = mongodbatlas_cloud_backup_snapshot.test.project_id + cluster_name = mongodbatlas_cloud_backup_snapshot.test.cluster_name + snapshot_id = mongodbatlas_cloud_backup_snapshot.test.snapshot_id + delivery_type_config { + download = true + } +} ``` ## Argument Reference diff --git a/docs/resources/cloud_backup_snapshot_restore_job.md b/docs/resources/cloud_backup_snapshot_restore_job.md index 2f4d5e0b6e..3107922d51 100644 --- a/docs/resources/cloud_backup_snapshot_restore_job.md +++ b/docs/resources/cloud_backup_snapshot_restore_job.md @@ -19,85 +19,107 @@ ### Example automated delivery type ```terraform - resource "mongodbatlas_cluster" "my_cluster" { - project_id = "5cf5a45a9ccf6400e60981b6" - name = "MyCluster" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } } +} - resource "mongodbatlas_cloud_provider_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name - description = "myDescription" - retention_in_days = 1 - } +resource "mongodbatlas_cloud_provider_snapshot" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + description = "myDescription" + retention_in_days = 1 +} - resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { - project_id = mongodbatlas_cloud_provider_snapshot.test.project_id - cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name - snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id - delivery_type_config { - automated = true - target_cluster_name = "MyCluster" - target_project_id = "5cf5a45a9ccf6400e60981b6" - } +resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { + project_id = mongodbatlas_cloud_provider_snapshot.test.project_id + cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name + snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id + delivery_type_config { + automated = true + target_cluster_name = "MyCluster" + target_project_id = "5cf5a45a9ccf6400e60981b6" } +} ``` ### Example download delivery type ```terraform - resource "mongodbatlas_cluster" "my_cluster" { - project_id = "5cf5a45a9ccf6400e60981b6" - name = "MyCluster" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } } +} - resource "mongodbatlas_cloud_provider_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name - description = "myDescription" - retention_in_days = 1 - } - - resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { - project_id = mongodbatlas_cloud_provider_snapshot.test.project_id - cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name - snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id - delivery_type_config { - download = true - } +resource "mongodbatlas_cloud_provider_snapshot" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + description = "myDescription" + retention_in_days = 1 +} + +resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { + project_id = mongodbatlas_cloud_provider_snapshot.test.project_id + cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name + snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id + delivery_type_config { + download = true } +} ``` ### Example of a point in time restore ``` -resource "mongodbatlas_cluster" "cluster_test" { - project_id = mongodbatlas_project.project_test.id - name = var.cluster_name - - # Provider Settings "block" - provider_name = "AWS" - provider_region_name = "US_EAST_1" - provider_instance_size_name = "M10" - cloud_backup = true # enable cloud provider snapshots - pit_enabled = true +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } - resource "mongodbatlas_cloud_backup_snapshot" "test" { - project_id = mongodbatlas_cluster.cluster_test.project_id - cluster_name = mongodbatlas_cluster.cluster_test.name + project_id = mongodbatlas_advanced_cluster.cluster_test.project_id + cluster_name = mongodbatlas_advanced_cluster.cluster_test.name description = "My description" retention_in_days = "1" } @@ -110,8 +132,8 @@ resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { delivery_type_config { point_in_time = true - target_cluster_name = mongodbatlas_cluster.cluster_test.name - target_project_id = mongodbatlas_cluster.cluster_test.project_id + target_cluster_name = mongodbatlas_advanced_cluster.cluster_test.name + target_project_id = mongodbatlas_advanced_cluster.cluster_test.project_id point_in_time_utc_seconds = var.point_in_time_utc_seconds } } diff --git a/docs/resources/cloud_provider_snapshot.md b/docs/resources/cloud_provider_snapshot.md index 6d7f756632..df20876be0 100644 --- a/docs/resources/cloud_provider_snapshot.md +++ b/docs/resources/cloud_provider_snapshot.md @@ -15,33 +15,41 @@ On-demand snapshots happen immediately, unlike scheduled snapshots which occur a ## Example Usage ```terraform - resource "mongodbatlas_cluster" "my_cluster" { - project_id = "5cf5a45a9ccf6400e60981b6" - name = "MyCluster" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots - } - - resource "mongodbatlas_cloud_provider_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name - description = "myDescription" - retention_in_days = 1 - timeout = "10m" - } - - resource "mongodbatlas_cloud_provider_snapshot_restore_job" "test" { - project_id = mongodbatlas_cloud_provider_snapshot.test.project_id - cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name - snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id - delivery_type_config { - download = true +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } +} + +resource "mongodbatlas_cloud_provider_snapshot" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + description = "myDescription" + retention_in_days = 1 + timeout = "10m" +} + +resource "mongodbatlas_cloud_provider_snapshot_restore_job" "test" { + project_id = mongodbatlas_cloud_provider_snapshot.test.project_id + cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name + snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id + delivery_type_config { + download = true + } +} ``` ## Argument Reference diff --git a/docs/resources/cloud_provider_snapshot_backup_policy.md b/docs/resources/cloud_provider_snapshot_backup_policy.md index 91c057cb2a..f28a17a553 100644 --- a/docs/resources/cloud_provider_snapshot_backup_policy.md +++ b/docs/resources/cloud_provider_snapshot_backup_policy.md @@ -17,20 +17,28 @@ When Cloud Backup is enabled for a cluster MongoDB Atlas automatically creates a ## Example Usage - Create a Cluster and Modify the 4 Default Policies Simultaneously ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // must be enabled in order to use cloud_provider_snapshot_backup_policy resource +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # must be enabled in order to use cloud_provider_snapshot_backup_policy resource + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -39,10 +47,10 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { //Keep all 4 default policies but modify the units and values //Could also just reflect the policy defaults here for later management policies { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id frequency_interval = 1 frequency_type = "hourly" retention_unit = "days" @@ -50,7 +58,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id frequency_interval = 1 frequency_type = "daily" retention_unit = "days" @@ -58,7 +66,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id frequency_interval = 4 frequency_type = "weekly" retention_unit = "weeks" @@ -66,7 +74,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id frequency_interval = 5 frequency_type = "monthly" retention_unit = "months" @@ -81,20 +89,28 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { ## Example Usage - Create a Cluster and Modify 3 Default Policies and Remove 1 Default Policy Simultaneously ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // must be enabled in order to use cloud_provider_snapshot_backup_policy resource +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # must be enabled in order to use cloud_provider_snapshot_backup_policy resource + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -102,10 +118,10 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { policies { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id frequency_interval = 1 frequency_type = "hourly" retention_unit = "days" @@ -113,7 +129,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id frequency_interval = 1 frequency_type = "daily" retention_unit = "days" @@ -122,7 +138,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { # Item removed # policy_item { - # id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id + # id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id # frequency_interval = 4 # frequency_type = "weekly" # retention_unit = "weeks" @@ -130,7 +146,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { # } policy_item { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id frequency_interval = 5 frequency_type = "monthly" retention_unit = "months" @@ -147,20 +163,28 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { ## Example Usage - Remove 3 Default Policies Items After the Cluster Has Already Been Created and Modify One Policy ```terraform -resource "mongodbatlas_cluster" "my_cluster" { - project_id = "" - name = "clusterTest" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_CENTRAL_1" - provider_instance_size_name = "M10" - cloud_backup = true // must be enabled in order to use cloud_provider_snapshot_backup_policy resource +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # must be enabled in order to use cloud_provider_snapshot_backup_policy resource + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_CENTRAL_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name reference_hour_of_day = 3 reference_minute_of_hour = 45 @@ -168,11 +192,11 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { policies { - id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id + id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.id # Item removed # policy_item { - # id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id + # id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.0.id # frequency_interval = 1 # frequency_type = "hourly" # retention_unit = "days" @@ -181,7 +205,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { # Item removed # policy_item { - # id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id + # id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.1.id # frequency_interval = 1 # frequency_type = "daily" # retention_unit = "days" @@ -190,7 +214,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { # Item removed # policy_item { - # id = mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id + # id = mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.2.id # frequency_interval = 4 # frequency_type = "weekly" # retention_unit = "weeks" @@ -208,7 +232,7 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { } ``` --> **NOTE:** In this example we decided to remove the first 3 items so we can't use `mongodbatlas_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id` to retrieve the monthly id value of the cluster state due to once the cluster being modified or makes a `terraform refresh` will cause that the three items will remove from the state, so we will get an error due to the index 3 doesn't exists any more and our monthly policy item is moved to the first place of the array. So we use `5f0747cad187d8609a72f546`, which is an example of an id MongoDB Atlas returns for the policy item we want to keep. Here it is hard coded because you need to either use the actual value from the Terraform state or look to map the policy item you want to keep to it's current placement in the state file array. +-> **NOTE:** In this example we decided to remove the first 3 items so we can't use `mongodbatlas_advanced_cluster.my_cluster.snapshot_backup_policy.0.policies.0.policy_item.3.id` to retrieve the monthly id value of the cluster state due to once the cluster being modified or makes a `terraform refresh` will cause that the three items will remove from the state, so we will get an error due to the index 3 doesn't exists any more and our monthly policy item is moved to the first place of the array. So we use `5f0747cad187d8609a72f546`, which is an example of an id MongoDB Atlas returns for the policy item we want to keep. Here it is hard coded because you need to either use the actual value from the Terraform state or look to map the policy item you want to keep to it's current placement in the state file array. ## Argument Reference @@ -221,11 +245,11 @@ resource "mongodbatlas_cloud_provider_snapshot_backup_policy" "test" { ### Policies * `policies` - (Required) Contains a document for each backup policy item in the desired updated backup policy. -* `policies.#.id` - (Required) Unique identifier of the backup policy that you want to update. policies.#.id is a value obtained via the mongodbatlas_cluster resource. `cloud_backup` of the mongodbatlas_cluster resource must be set to true. See the example above for how to refer to the mongodbatlas_cluster resource for policies.#.id +* `policies.#.id` - (Required) Unique identifier of the backup policy that you want to update. policies.#.id is a value obtained via the mongodbatlas_advanced_cluster resource. `cloud_backup` of the mongodbatlas_advanced_cluster resource must be set to true. See the example above for how to refer to the mongodbatlas_advanced_cluster resource for policies.#.id #### Policy Item * `policies.#.policy_item` - (Required) Array of backup policy items. -* `policies.#.policy_item.#.id` - (Required) Unique identifier of the backup policy item. `policies.#.policy_item.#.id` is a value obtained via the mongodbatlas_cluster resource. `cloud_backup` of the mongodbatlas_cluster resource must be set to true. See the example above for how to refer to the mongodbatlas_cluster resource for policies.#.policy_item.#.id +* `policies.#.policy_item.#.id` - (Required) Unique identifier of the backup policy item. `policies.#.policy_item.#.id` is a value obtained via the mongodbatlas_advanced_cluster resource. `cloud_backup` of the mongodbatlas_advanced_cluster resource must be set to true. See the example above for how to refer to the mongodbatlas_advanced_cluster resource for policies.#.policy_item.#.id * `policies.#.policy_item.#.frequency_interval` - (Required) Desired frequency of the new backup policy item specified by frequencyType. * `policies.#.policy_item.#.frequency_type` - (Required) Frequency associated with the backup policy item. One of the following values: hourly, daily, weekly or monthly. * `policies.#.policy_item.#.retention_unit` - (Required) Scope of the backup policy item: days, weeks, or months. diff --git a/docs/resources/cloud_provider_snapshot_restore_job.md b/docs/resources/cloud_provider_snapshot_restore_job.md index 5df4d1b533..00d0f36875 100644 --- a/docs/resources/cloud_provider_snapshot_restore_job.md +++ b/docs/resources/cloud_provider_snapshot_restore_job.md @@ -23,66 +23,82 @@ subcategory: "Deprecated" ### Example automated delivery type. ```terraform - resource "mongodbatlas_cluster" "my_cluster" { - project_id = "5cf5a45a9ccf6400e60981b6" - name = "MyCluster" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots - } - - resource "mongodbatlas_cloud_provider_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name - description = "myDescription" - retention_in_days = 1 - } - - resource "mongodbatlas_cloud_provider_snapshot_restore_job" "test" { - project_id = mongodbatlas_cloud_provider_snapshot.test.project_id - cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name - snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id - delivery_type_config { - automated = true - target_cluster_name = "MyCluster" - target_project_id = "5cf5a45a9ccf6400e60981b6" +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } } - depends_on = [mongodbatlas_cloud_provider_snapshot.test] } +} + +resource "mongodbatlas_cloud_provider_snapshot" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + description = "myDescription" + retention_in_days = 1 +} + +resource "mongodbatlas_cloud_provider_snapshot_restore_job" "test" { + project_id = mongodbatlas_cloud_provider_snapshot.test.project_id + cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name + snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id + delivery_type_config { + automated = true + target_cluster_name = "MyCluster" + target_project_id = "5cf5a45a9ccf6400e60981b6" + } + depends_on = [mongodbatlas_cloud_provider_snapshot.test] +} ``` ### Example download delivery type. ```terraform - resource "mongodbatlas_cluster" "my_cluster" { - project_id = "5cf5a45a9ccf6400e60981b6" - name = "MyCluster" - - //Provider Settings "block" - provider_name = "AWS" - provider_region_name = "EU_WEST_2" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots - } - - resource "mongodbatlas_cloud_provider_snapshot" "test" { - project_id = mongodbatlas_cluster.my_cluster.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name - description = "myDescription" - retention_in_days = 1 - } - - resource "mongodbatlas_cloud_provider_snapshot_restore_job" "test" { - project_id = mongodbatlas_cloud_provider_snapshot.test.project_id - cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name - snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id - delivery_type_config { - download = true +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = "" + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } +} + +resource "mongodbatlas_cloud_provider_snapshot" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + description = "myDescription" + retention_in_days = 1 +} + +resource "mongodbatlas_cloud_provider_snapshot_restore_job" "test" { + project_id = mongodbatlas_cloud_provider_snapshot.test.project_id + cluster_name = mongodbatlas_cloud_provider_snapshot.test.cluster_name + snapshot_id = mongodbatlas_cloud_provider_snapshot.test.snapshot_id + delivery_type_config { + download = true + } +} ``` ## Argument Reference diff --git a/docs/resources/data_lake_pipeline.md b/docs/resources/data_lake_pipeline.md index a32acac567..7b94a14291 100644 --- a/docs/resources/data_lake_pipeline.md +++ b/docs/resources/data_lake_pipeline.md @@ -14,16 +14,23 @@ resource "mongodbatlas_project" "projectTest" { } resource "mongodbatlas_advanced_cluster" "automated_backup_test" { - project_id = "63f4d4a47baeac59406dc131" - name = "automated-backup-test" - - provider_name = "GCP" - provider_region_name = "US_EAST_4" - provider_instance_size_name = "M10" - cloud_backup = true // enable cloud backup snapshots - mongo_db_major_version = "7.0" + project_id = var.project_id + name = "automated-backup-test" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "GCP" + region_name = "US_EAST_4" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } } - +} resource "mongodbatlas_data_lake_pipeline" "pipeline" { project_id = mongodbatlas_project.projectTest.project_id @@ -38,7 +45,7 @@ resource "mongodbatlas_data_lake_pipeline" "pipeline" { source { type = "ON_DEMAND_CPS" - cluster_name = mongodbatlas_cluster.automated_backup_test.name + cluster_name = mongodbatlas_advanced_cluster.automated_backup_test.name database_name = "sample_airbnb" collection_name = "listingsAndReviews" } diff --git a/docs/resources/encryption_at_rest.md b/docs/resources/encryption_at_rest.md index 37e711aa56..ea85a74fa2 100644 --- a/docs/resources/encryption_at_rest.md +++ b/docs/resources/encryption_at_rest.md @@ -84,25 +84,26 @@ resource "mongodbatlas_encryption_at_rest" "example" { } } -resource "mongodbatlas_cluster" "example_cluster" { - project_id = mongodbatlas_encryption_at_rest.example.project_id - name = "CLUSTER NAME" - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "example_cluster" { + project_id = mongodbatlas_encryption_at_rest.example.project_id + name = "CLUSTER NAME" + cluster_type = "REPLICASET" + backup_enabled = true + encryption_at_rest_provider = "AZURE" + replication_specs { - num_shards = 1 - regions_config { - region_name = "REGION" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = "REGION" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - - provider_name = "AZURE" - provider_instance_size_name = "M10" - mongo_db_major_version = "7.0" - encryption_at_rest_provider = "AZURE" } + ``` ## Argument Reference diff --git a/docs/resources/ldap_verify.md b/docs/resources/ldap_verify.md index 4bb9530b2b..681a2d0223 100644 --- a/docs/resources/ldap_verify.md +++ b/docs/resources/ldap_verify.md @@ -10,15 +10,23 @@ resource "mongodbatlas_project" "test" { org_id = "ORG ID" } -resource "mongodbatlas_cluster" "test" { - project_id = mongodbatlas_project.test.id - name = "NAME OF THE CLUSTER" - - // Provider Settings "block" - provider_name = "AWS" - provider_region_name = "US_EAST_2" - provider_instance_size_name = "M10" - cloud_backup = true //enable cloud provider snapshots +resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.test.id + name = "NAME OF THE CLUSTER" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud backup snapshots + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_ldap_verify" "test" { @@ -27,7 +35,7 @@ resource "mongodbatlas_ldap_verify" "test" { port = 636 bind_username = "USERNAME" bind_password = "PASSWORD" - depends_on = [mongodbatlas_cluster.test] + depends_on = [ mongodbatlas_advanced_cluster.test ] } ``` diff --git a/docs/resources/network_peering.md b/docs/resources/network_peering.md index b62d67e2de..57b73ca768 100644 --- a/docs/resources/network_peering.md +++ b/docs/resources/network_peering.md @@ -102,30 +102,25 @@ resource "google_compute_network_peering" "peering" { } # Create the cluster once the peering connection is completed -resource "mongodbatlas_cluster" "test" { - project_id = local.project_id - name = "terraform-manually-test" - num_shards = 1 - - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "test" { + project_id = local.project_id + name = "terraform-manually-test" + cluster_type = "REPLICASET" + backup_enabled = true + replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_4" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "GCP" + region_name = "US_EAST_4" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - - # Provider Settings "block" - provider_name = "GCP" - provider_instance_size_name = "M10" - depends_on = ["google_compute_network_peering.peering"] + depends_on = [ google_compute_network_peering.peering ] } # Private connection strings are not available w/ GCP until the reciprocal @@ -166,32 +161,26 @@ resource "mongodbatlas_network_peering" "test" { } # Create the cluster once the peering connection is completed -resource "mongodbatlas_cluster" "test" { - project_id = local.project_id - name = "terraform-manually-test" +resource "mongodbatlas_advanced_cluster" "test" { + project_id = local.project_id + name = "terraform-manually-test" + cluster_type = "REPLICASET" + backup_enabled = true - cluster_type = "REPLICASET" replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = "US_EAST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - - # Provider Settings "block" - provider_name = "AZURE" - provider_disk_type_name = "P4" - provider_instance_size_name = "M10" - - depends_on = ["mongodbatlas_network_peering.test"] + depends_on = [ mongodbatlas_network_peering.test ] } - ``` ## Example Usage - Peering Connection Only, Container Exists @@ -201,27 +190,23 @@ You can create a peering connection if an appropriate container for your cloud p ```terraform # Create an Atlas cluster, this creates a container if one # does not yet exist for this AWS region -resource "mongodbatlas_cluster" "test" { - project_id = local.project_id - name = "terraform-test" - - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "test" { + project_id = local.project_id + name = "terraform-manually-test" + cluster_type = "REPLICASET" + backup_enabled = true + replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - - auto_scaling_disk_gb_enabled = false - mongo_db_major_version = "7.0" - - //Provider Settings "block" - provider_name = "AWS" - provider_instance_size_name = "M10" } # the following assumes an AWS provider is configured @@ -235,7 +220,7 @@ resource "aws_default_vpc" "default" { resource "mongodbatlas_network_peering" "mongo_peer" { accepter_region_name = "us-east-2" project_id = local.project_id - container_id = mongodbatlas_cluster.test.container_id + container_id = one(values(mongodbatlas_advanced_cluster.test.container_id)) provider_name = "AWS" route_table_cidr_block = "172.31.0.0/16" vpc_id = aws_default_vpc.default.id @@ -257,27 +242,23 @@ resource "aws_vpc_peering_connection_accepter" "aws_peer" { ```terraform # Create an Atlas cluster, this creates a container if one # does not yet exist for this GCP -resource "mongodbatlas_cluster" "test" { - project_id = local.project_id - name = "terraform-manually-test" +resource "mongodbatlas_advanced_cluster" "test" { + project_id = local.project_id + name = "terraform-manually-test" + cluster_type = "REPLICASET" + backup_enabled = true - cluster_type = "REPLICASET" replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "GCP" + region_name = "US_EAST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - - //Provider Settings "block" - provider_name = "GCP" - provider_instance_size_name = "M10" } # Create the peering connection request @@ -285,7 +266,7 @@ resource "mongodbatlas_network_peering" "test" { project_id = local.project_id atlas_cidr_block = "192.168.0.0/18" - container_id = mongodbatlas_cluster.test.container_id + container_id = one(values(mongodbatlas_advanced_cluster.test.replication_specs[0].container_id)) provider_name = "GCP" gcp_project_id = local.GCP_PROJECT_ID network_name = "default" @@ -313,33 +294,29 @@ resource "google_compute_network_peering" "peering" { # Create an Atlas cluster, this creates a container if one # does not yet exist for this AZURE region -resource "mongodbatlas_cluster" "test" { - project_id = local.project_id - name = "cluster-azure" +resource "mongodbatlas_advanced_cluster" "test" { + project_id = local.project_id + name = "cluster-azure" + cluster_type = "REPLICASET" + backup_enabled = true - cluster_type = "REPLICASET" replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = "US_EAST_2" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - - auto_scaling_disk_gb_enabled = false - mongo_db_major_version = "7.0" - - //Provider Settings "block" - provider_name = "AZURE" - provider_instance_size_name = "M10" } # Create the peering connection request resource "mongodbatlas_network_peering" "test" { project_id = local.project_id - container_id = mongodbatlas_cluster.test.container_id + container_id = one(values(mongodbatlas_advanced_cluster.test.replication_specs[0].container_id)) provider_name = "AZURE" azure_directory_id = local.AZURE_DIRECTORY_ID azure_subscription_id = local.AZURE_SUBSCRIPTION_ID diff --git a/examples/mongodbatlas_api_key/create-and-assign-pak-together/versions.tf b/examples/mongodbatlas_api_key/create-and-assign-pak-together/versions.tf index 5a81a39da8..1888453805 100644 --- a/examples/mongodbatlas_api_key/create-and-assign-pak-together/versions.tf +++ b/examples/mongodbatlas_api_key/create-and-assign-pak-together/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { mongodbatlas = { source = "mongodb/mongodbatlas" - version = "~> 1.13.2" + version = "~> 1.0" } } required_version = ">= 1.0" diff --git a/examples/mongodbatlas_api_key/create-and-assign-pak/versions.tf b/examples/mongodbatlas_api_key/create-and-assign-pak/versions.tf index 5a81a39da8..1888453805 100644 --- a/examples/mongodbatlas_api_key/create-and-assign-pak/versions.tf +++ b/examples/mongodbatlas_api_key/create-and-assign-pak/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { mongodbatlas = { source = "mongodb/mongodbatlas" - version = "~> 1.13.2" + version = "~> 1.0" } } required_version = ">= 1.0" diff --git a/examples/mongodbatlas_api_key/create-api-key-assign-to-multiple-projects/versions.tf b/examples/mongodbatlas_api_key/create-api-key-assign-to-multiple-projects/versions.tf index 5a81a39da8..1888453805 100644 --- a/examples/mongodbatlas_api_key/create-api-key-assign-to-multiple-projects/versions.tf +++ b/examples/mongodbatlas_api_key/create-api-key-assign-to-multiple-projects/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { mongodbatlas = { source = "mongodb/mongodbatlas" - version = "~> 1.13.2" + version = "~> 1.0" } } required_version = ">= 1.0" diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_job/main.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_job/main.tf index ec16baddb9..952daf02b9 100644 --- a/examples/mongodbatlas_cloud_backup_snapshot_export_job/main.tf +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_job/main.tf @@ -22,20 +22,28 @@ resource "aws_s3_bucket" "test_bucket" { } } -resource "mongodbatlas_cluster" "my_cluster" { - project_id = var.project_id - name = "MyCluster" - disk_size_gb = 1 - - provider_name = "AWS" - provider_region_name = "US_EAST_1" - provider_instance_size_name = "M10" - cloud_backup = true +resource "mongodbatlas_advanced_cluster" "my_cluster" { + project_id = var.project_id + name = "MyCluster" + cluster_type = "REPLICASET" + backup_enabled = true + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } resource "mongodbatlas_cloud_backup_snapshot" "test" { project_id = var.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name description = "myDescription" retention_in_days = 1 } @@ -50,7 +58,7 @@ resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { resource "mongodbatlas_cloud_backup_snapshot_export_job" "test" { project_id = var.project_id - cluster_name = mongodbatlas_cluster.my_cluster.name + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name snapshot_id = mongodbatlas_cloud_backup_snapshot.test.snapshot_id export_bucket_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.export_bucket_id diff --git a/examples/mongodbatlas_cloud_provider_snapshot_restore_job/point-in-time/main.tf b/examples/mongodbatlas_cloud_provider_snapshot_restore_job/point-in-time/main.tf index 7c518367ed..e146add173 100644 --- a/examples/mongodbatlas_cloud_provider_snapshot_restore_job/point-in-time/main.tf +++ b/examples/mongodbatlas_cloud_provider_snapshot_restore_job/point-in-time/main.tf @@ -5,23 +5,31 @@ resource "mongodbatlas_project" "project_test" { org_id = var.org_id } -resource "mongodbatlas_cluster" "cluster_test" { - project_id = mongodbatlas_project.project_test.id - name = var.cluster_name +resource "mongodbatlas_advanced_cluster" "cluster_test" { + project_id = mongodbatlas_project.project_test.id + name = var.cluster_name + cluster_type = "REPLICASET" - # Provider Settings "block" - provider_name = "AWS" - provider_region_name = "US_EAST_1" - provider_instance_size_name = "M10" - cloud_backup = true # enable cloud provider snapshots - pit_enabled = true - retain_backups_enabled = true # keep the backup snapshopts once the cluster is deleted -} + backup_enabled = true # enable cloud provider snapshots + pit_enabled = true + retain_backups_enabled = true # keep the backup snapshopts once the cluster is deleted + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } +} resource "mongodbatlas_cloud_backup_snapshot" "test" { - project_id = mongodbatlas_cluster.cluster_test.project_id - cluster_name = mongodbatlas_cluster.cluster_test.name + project_id = mongodbatlas_advanced_cluster.cluster_test.project_id + cluster_name = mongodbatlas_advanced_cluster.cluster_test.name description = "My description" retention_in_days = "1" } @@ -34,8 +42,8 @@ resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { delivery_type_config { point_in_time = true - target_cluster_name = mongodbatlas_cluster.cluster_test.name - target_project_id = mongodbatlas_cluster.cluster_test.project_id + target_cluster_name = mongodbatlas_advanced_cluster.cluster_test.name + target_project_id = mongodbatlas_advanced_cluster.cluster_test.project_id point_in_time_utc_seconds = var.point_in_time_utc_seconds } } diff --git a/examples/mongodbatlas_database_user/atlas_cluster.tf b/examples/mongodbatlas_database_user/atlas_cluster.tf index 75ad5ba2a0..985cc4462c 100644 --- a/examples/mongodbatlas_database_user/atlas_cluster.tf +++ b/examples/mongodbatlas_database_user/atlas_cluster.tf @@ -1,24 +1,22 @@ -resource "mongodbatlas_cluster" "cluster" { - project_id = mongodbatlas_project.project1.id - name = "MongoDB_Atlas" - mongo_db_major_version = "7.0" - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "cluster" { + project_id = mongodbatlas_project.project1.id + name = "MongoDB_Atlas" + cluster_type = "REPLICASET" + backup_enabled = true + replication_specs { - num_shards = 1 - regions_config { - region_name = var.region - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AWS" + region_name = var.region + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - # Provider Settings "block" - cloud_backup = true - auto_scaling_disk_gb_enabled = true - provider_name = "AWS" - disk_size_gb = 10 - provider_instance_size_name = "M10" } + output "atlasclusterstring" { - value = mongodbatlas_cluster.cluster.connection_strings + value = mongodbatlas_advanced_cluster.cluster.connection_strings } diff --git a/examples/mongodbatlas_database_user/main.tf b/examples/mongodbatlas_database_user/main.tf index c685f0c7c1..5ee8b13f6a 100644 --- a/examples/mongodbatlas_database_user/main.tf +++ b/examples/mongodbatlas_database_user/main.tf @@ -15,7 +15,7 @@ resource "mongodbatlas_database_user" "user1" { } scopes { - name = mongodbatlas_cluster.cluster.name + name = mongodbatlas_advanced_cluster.cluster.name type = "CLUSTER" } } diff --git a/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md b/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md index 01b3fc0988..f3c104f821 100644 --- a/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md +++ b/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/README.md @@ -70,15 +70,15 @@ terraform destroy 1. Import the cluster using the Project ID and cluster name (e.g. `5beae24579358e0ae95492af-MyCluster`): - $ terraform import mongodbatlas_cluster.my_cluster ProjectId-ClusterName + $ terraform import mongodbatlas_advanced_cluster.cluster ProjectId-ClusterName -2. Add any non-default values to the cluster resource *mongodbatlas_cluster.my_cluster* in *main.tf*. And add the following attribute: `encryption_at_rest_provider = "AWS"` +2. Add any non-default values to the cluster resource *mongodbatlas_advanced_cluster.cluster* in *main.tf*. And add the following attribute: `encryption_at_rest_provider = "AWS"` 3. Run terraform apply to enable encryption at rest for the cluster: `terraform apply` 4. (Optional) To remove the cluster from TF state, in case you want to disable project-level encryption and delete the role and key without deleting the imported cluster: - 1. First disable encryption on the cluster by changing the attribute `encryption_at_rest_provider = "NONE"` for the cluster resource *mongodbatlas_cluster.my_cluster* in *main.tf*. If you skip this and the next step, you won't be able to disable encryption on the project-level + 1. First disable encryption on the cluster by changing the attribute `encryption_at_rest_provider = "NONE"` for the cluster resource *mongodbatlas_advanced_cluster.cluster* in *main.tf*. If you skip this and the next step, you won't be able to disable encryption on the project-level 2. Run terraform apply to disable encryption for the cluster: `terraform apply` 3. Finally, remove the cluster from TF state: - terraform state rm mongodbatlas_cluster.my_cluster + terraform state rm mongodbatlas_advanced_cluster.cluster 4. You should now be able to run terraform destroy without deleting the cluster: `terraform destroy` diff --git a/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/main.tf b/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/main.tf index c4fb0b4e7b..fb4b6d9826 100644 --- a/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/main.tf +++ b/examples/mongodbatlas_encryption_at_rest/aws/atlas-cluster/main.tf @@ -23,13 +23,22 @@ resource "mongodbatlas_encryption_at_rest" "test" { } } -resource "mongodbatlas_cluster" "cluster" { +resource "mongodbatlas_advanced_cluster" "cluster" { project_id = var.atlas_project_id name = "MyCluster" cluster_type = "REPLICASET" - provider_name = "AWS" + backup_enabled = true encryption_at_rest_provider = "AWS" - backing_provider_name = "AWS" - provider_region_name = "US_EAST_1" - provider_instance_size_name = "M10" + + replication_specs { + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } } diff --git a/examples/mongodbatlas_federated_settings_identity_provider/azure/atlas.tf b/examples/mongodbatlas_federated_settings_identity_provider/azure/atlas.tf index 575ba671cd..42a890c75e 100644 --- a/examples/mongodbatlas_federated_settings_identity_provider/azure/atlas.tf +++ b/examples/mongodbatlas_federated_settings_identity_provider/azure/atlas.tf @@ -1,5 +1,5 @@ locals { - mongodb_uri = mongodbatlas_cluster.this.connection_strings[0].standard + mongodb_uri = mongodbatlas_advanced_cluster.this.connection_strings[0].standard } data "mongodbatlas_federated_settings" "this" { @@ -16,25 +16,22 @@ resource "mongodbatlas_project_ip_access_list" "mongo-access" { cidr_block = "0.0.0.0/0" } -resource "mongodbatlas_cluster" "this" { - project_id = mongodbatlas_project.this.id - name = var.project_name - mongo_db_major_version = "7.0" - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "this" { + project_id = mongodbatlas_project.this.id + name = var.project_name + cluster_type = "REPLICASET" + replication_specs { - num_shards = 1 - regions_config { - region_name = var.region - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AWS" + region_name = var.region + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - cloud_backup = false - auto_scaling_disk_gb_enabled = false - provider_name = "AWS" - disk_size_gb = 10 - provider_instance_size_name = "M10" } resource "mongodbatlas_federated_settings_identity_provider" "oidc" { diff --git a/examples/mongodbatlas_federated_settings_identity_provider/azure/outputs.tf b/examples/mongodbatlas_federated_settings_identity_provider/azure/outputs.tf index 57cdf85701..04d4a84209 100644 --- a/examples/mongodbatlas_federated_settings_identity_provider/azure/outputs.tf +++ b/examples/mongodbatlas_federated_settings_identity_provider/azure/outputs.tf @@ -9,7 +9,7 @@ output "ssh_connection_string" { } output "user_test_conn_string" { - value = "mongodb+srv://${local.test_user_username}:${local.test_user_password}@${replace(mongodbatlas_cluster.this.srv_address, "mongodb+srv://", "")}/?retryWrites=true" + value = "mongodb+srv://${local.test_user_username}:${local.test_user_password}@${replace(mongodbatlas_advanced_cluster.this.connection_strings[0].standard_srv, "mongodb+srv://", "")}/?retryWrites=true" sensitive = true description = "Useful for connecting to the database from Compass or other tool to validate data" } diff --git a/examples/mongodbatlas_network_peering/aws/main.tf b/examples/mongodbatlas_network_peering/aws/main.tf index 7c1b945b80..28da1d5cda 100644 --- a/examples/mongodbatlas_network_peering/aws/main.tf +++ b/examples/mongodbatlas_network_peering/aws/main.tf @@ -8,27 +8,23 @@ resource "mongodbatlas_project" "aws_atlas" { org_id = var.atlas_org_id } -resource "mongodbatlas_cluster" "cluster-atlas" { - project_id = mongodbatlas_project.aws_atlas.id - name = "cluster-atlas" - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "cluster-atlas" { + project_id = mongodbatlas_project.aws_atlas.id + name = "cluster-atlas" + cluster_type = "REPLICASET" + backup_enabled = true + replication_specs { - num_shards = 1 - regions_config { - region_name = var.atlas_region - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AWS" + region_name = var.atlas_region + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - cloud_backup = true - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - - # Provider Settings "block" - provider_name = "AWS" - disk_size_gb = 10 - provider_instance_size_name = "M10" } resource "mongodbatlas_database_user" "db-user" { @@ -46,7 +42,7 @@ resource "mongodbatlas_database_user" "db-user" { resource "mongodbatlas_network_peering" "aws-atlas" { accepter_region_name = var.aws_region project_id = mongodbatlas_project.aws_atlas.id - container_id = mongodbatlas_cluster.cluster-atlas.container_id + container_id = one(values(mongodbatlas_advanced_cluster.cluster-atlas.replication_specs[0].container_id)) provider_name = "AWS" route_table_cidr_block = aws_vpc.primary.cidr_block vpc_id = aws_vpc.primary.id diff --git a/examples/mongodbatlas_network_peering/azure/atlas.tf b/examples/mongodbatlas_network_peering/azure/atlas.tf index 4cfe740422..5485899bcf 100644 --- a/examples/mongodbatlas_network_peering/azure/atlas.tf +++ b/examples/mongodbatlas_network_peering/azure/atlas.tf @@ -3,34 +3,31 @@ provider "mongodbatlas" { public_key = var.public_key private_key = var.private_key } + # Create the mongodb atlas Azure cluster -resource "mongodbatlas_cluster" "azure-cluster" { - project_id = var.project_id - name = var.name - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "azure-cluster" { + project_id = var.project_id + name = var.name + cluster_type = "REPLICASET" + backup_enabled = true + replication_specs { - num_shards = 1 - regions_config { - region_name = var.provider_region_name - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = var.provider_region_name + electable_specs { + instance_size = var.provider_instance_size_name + node_count = 3 + } } } - backup_enabled = false - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - - # Provider settings block in this case it is Azure - provider_name = "AZURE" - provider_disk_type_name = var.provider_disk_type_name - provider_instance_size_name = var.provider_instance_size_name } # Create the peering connection request resource "mongodbatlas_network_peering" "test" { project_id = var.project_id - container_id = mongodbatlas_cluster.azure-cluster.container_id + container_id = one(values(mongodbatlas_advanced_cluster.azure-cluster.replication_specs[0].container_id)) provider_name = "AZURE" azure_directory_id = data.azurerm_client_config.current.tenant_id azure_subscription_id = data.azurerm_client_config.current.subscription_id diff --git a/examples/mongodbatlas_network_peering/azure/variables.tf b/examples/mongodbatlas_network_peering/azure/variables.tf index 998e4db4c2..cffaacb7e2 100644 --- a/examples/mongodbatlas_network_peering/azure/variables.tf +++ b/examples/mongodbatlas_network_peering/azure/variables.tf @@ -10,9 +10,6 @@ variable "project_id" { variable "provider_instance_size_name" { type = string } -variable "provider_disk_type_name" { - type = string -} variable "resource_group_name" { type = string } diff --git a/examples/mongodbatlas_network_peering/gcp/cluster.tf b/examples/mongodbatlas_network_peering/gcp/cluster.tf index a3683eebca..b8e1d9ebe1 100644 --- a/examples/mongodbatlas_network_peering/gcp/cluster.tf +++ b/examples/mongodbatlas_network_peering/gcp/cluster.tf @@ -1,45 +1,45 @@ # This cluster is in GCP cloud-provider with VPC peering enabled -resource "mongodbatlas_cluster" "cluster" { - project_id = var.project_id - name = "cluster-test" - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "cluster" { + project_id = var.project_id + name = "cluster-test" + cluster_type = "REPLICASET" + backup_enabled = true # enable cloud provider snapshots + replication_specs { - num_shards = 1 - regions_config { - region_name = var.atlas_region - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "GCP" + region_name = var.atlas_region + electable_specs { + instance_size = "M10" + node_count = 3 + } + auto_scaling { + compute_enabled = true + compute_scale_down_enabled = true + compute_min_instance_size = "M10" + compute_max_instance_size = "M20" + disk_gb_enabled = true + } } } - labels { + tags { key = "environment" value = "prod" } - cloud_backup = true - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - auto_scaling_compute_enabled = true - auto_scaling_compute_scale_down_enabled = true - - - # Provider Settings "block" - provider_name = "GCP" - provider_instance_size_name = "M10" - provider_auto_scaling_compute_max_instance_size = "M20" - provider_auto_scaling_compute_min_instance_size = "M10" - disk_size_gb = 40 advanced_configuration { minimum_enabled_tls_protocol = "TLS1_2" } + lifecycle { ignore_changes = [ - provider_instance_size_name + replication_specs[0].region_configs[0].electable_specs[0].instance_size, ] } } + # The connection strings available for the GCP MognoDB Atlas cluster output "connection_string" { - value = mongodbatlas_cluster.cluster.connection_strings + value = mongodbatlas_advanced_cluster.cluster.connection_strings } diff --git a/examples/mongodbatlas_online_archive/main.tf b/examples/mongodbatlas_online_archive/main.tf index bb3d21bad2..ebb9eb8cdc 100644 --- a/examples/mongodbatlas_online_archive/main.tf +++ b/examples/mongodbatlas_online_archive/main.tf @@ -31,15 +31,22 @@ resource "mongodbatlas_online_archive" "users_archive" { } } -# tflint-ignore: terraform_unused_declarations data "mongodbatlas_online_archive" "read_archive" { project_id = mongodbatlas_online_archive.users_archive.project_id cluster_name = mongodbatlas_online_archive.users_archive.cluster_name archive_id = mongodbatlas_online_archive.users_archive.archive_id } -# tflint-ignore: terraform_unused_declarations data "mongodbatlas_online_archives" "all" { project_id = mongodbatlas_online_archive.users_archive.project_id cluster_name = mongodbatlas_online_archive.users_archive.cluster_name } + +output "online_archive_state" { + value = data.mongodbatlas_online_archive.read_archive.state +} + +output "online_archives_results" { + value = data.mongodbatlas_online_archives.all.results +} + diff --git a/examples/mongodbatlas_privatelink_endpoint/aws/cluster/README.md b/examples/mongodbatlas_privatelink_endpoint/aws/cluster/README.md index 4f2402e7c0..eb703bcc3e 100644 --- a/examples/mongodbatlas_privatelink_endpoint/aws/cluster/README.md +++ b/examples/mongodbatlas_privatelink_endpoint/aws/cluster/README.md @@ -83,7 +83,7 @@ $ terraform destroy 2. `mongodbatlas_privatelink_endpoint` is dependent on the `mongodbatlas_project` 3. `aws_vpc_endpoint` is dependent on the `mongodbatlas_privatelink_endpoint`, and its dependencies. 4. `mongodbatlas_privatelink_endpoint_service` is dependent on `aws_vpc_endpoint` and its dependencies. -5. `mongodbatlas_cluster` is dependent only on the `mongodbatlas_project`, howerver; its `connection_strings` are sourced from the `mongodbatlas_privatelink_endpoint_service`. `mongodbatlas_privatelink_endpoint_service` has explicitly been added to the `mongodbatlas_cluster` `depends_on` to ensure the private connection strings are correct following `terraform apply`. +5. `mongodbatlas_advanced_cluster` is dependent only on the `mongodbatlas_project`, howerver; its `connection_strings` are sourced from the `mongodbatlas_privatelink_endpoint_service`. `mongodbatlas_privatelink_endpoint_service` has explicitly been added to the `mongodbatlas_advanced_cluster` `depends_on` to ensure the private connection strings are correct following `terraform apply`. **Important Point** @@ -123,7 +123,7 @@ Cluster `connection_strings` is a list of maps matching the signature below. `aw In order to output the `private_endpoint.#.srv_connection_string` for the `aws_vpc_endpoint`, utilize locals such as the [following](output.tf): ``` locals { - private_endpoints = flatten([for cs in mongodbatlas_cluster.aws_private_connection.connection_strings : cs.private_endpoint]) + private_endpoints = flatten([for cs in mongodbatlas_advanced_cluster.aws_private_connection.connection_strings : cs.private_endpoint]) connection_strings = [ for pe in local.private_endpoints : pe.srv_connection_string diff --git a/examples/mongodbatlas_privatelink_endpoint/aws/cluster/atlas-cluster.tf b/examples/mongodbatlas_privatelink_endpoint/aws/cluster/atlas-cluster.tf index 4bf9bd3384..38e08232b1 100644 --- a/examples/mongodbatlas_privatelink_endpoint/aws/cluster/atlas-cluster.tf +++ b/examples/mongodbatlas_privatelink_endpoint/aws/cluster/atlas-cluster.tf @@ -1,23 +1,19 @@ -resource "mongodbatlas_cluster" "aws_private_connection" { - project_id = var.project_id - name = var.cluster_name - cloud_backup = true - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "aws_private_connection" { + project_id = var.project_id + name = var.cluster_name + cluster_type = "REPLICASET" + backup_enabled = true + replication_specs { - num_shards = 1 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - # Provider settings - provider_name = "AWS" - disk_size_gb = 10 - provider_instance_size_name = "M10" - depends_on = [mongodbatlas_privatelink_endpoint_service.pe_east_service] } diff --git a/examples/mongodbatlas_privatelink_endpoint/aws/cluster/output.tf b/examples/mongodbatlas_privatelink_endpoint/aws/cluster/output.tf index 45294b427f..28ce135c8d 100644 --- a/examples/mongodbatlas_privatelink_endpoint/aws/cluster/output.tf +++ b/examples/mongodbatlas_privatelink_endpoint/aws/cluster/output.tf @@ -1,5 +1,5 @@ locals { - private_endpoints = flatten([for cs in mongodbatlas_cluster.aws_private_connection.connection_strings : cs.private_endpoint]) + private_endpoints = flatten([for cs in mongodbatlas_advanced_cluster.aws_private_connection.connection_strings : cs.private_endpoint]) connection_strings = [ for pe in local.private_endpoints : pe.srv_connection_string diff --git a/examples/mongodbatlas_privatelink_endpoint/azure/main.tf b/examples/mongodbatlas_privatelink_endpoint/azure/main.tf index 6ccf44be28..f94d8046cd 100644 --- a/examples/mongodbatlas_privatelink_endpoint/azure/main.tf +++ b/examples/mongodbatlas_privatelink_endpoint/azure/main.tf @@ -19,12 +19,12 @@ resource "azurerm_virtual_network" "test" { } resource "azurerm_subnet" "test" { - name = "testsubnet" - resource_group_name = var.resource_group_name - virtual_network_name = azurerm_virtual_network.test.name - address_prefixes = ["10.0.1.0/24"] - enforce_private_link_service_network_policies = true - enforce_private_link_endpoint_network_policies = true + name = "testsubnet" + resource_group_name = var.resource_group_name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.1.0/24"] + private_link_service_network_policies_enabled = true + private_endpoint_network_policies_enabled = true } resource "mongodbatlas_privatelink_endpoint" "test" { diff --git a/examples/mongodbatlas_privatelink_endpoint_service_serverless/azure/main.tf b/examples/mongodbatlas_privatelink_endpoint_service_serverless/azure/main.tf index 2c683de23d..d40e580bc5 100644 --- a/examples/mongodbatlas_privatelink_endpoint_service_serverless/azure/main.tf +++ b/examples/mongodbatlas_privatelink_endpoint_service_serverless/azure/main.tf @@ -19,12 +19,12 @@ resource "azurerm_virtual_network" "test" { } resource "azurerm_subnet" "test" { - name = "testsubnet" - resource_group_name = var.resource_group_name - virtual_network_name = azurerm_virtual_network.test.name - address_prefixes = ["10.0.1.0/24"] - enforce_private_link_service_network_policies = true - enforce_private_link_endpoint_network_policies = true + name = "testsubnet" + resource_group_name = var.resource_group_name + virtual_network_name = azurerm_virtual_network.test.name + address_prefixes = ["10.0.1.0/24"] + private_link_service_network_policies_enabled = true + private_endpoint_network_policies_enabled = true } resource "mongodbatlas_privatelink_endpoint_serverless" "test" { diff --git a/examples/mongodbatlas_search_deployment/versions.tf b/examples/mongodbatlas_search_deployment/versions.tf index 7cac4906f0..1888453805 100644 --- a/examples/mongodbatlas_search_deployment/versions.tf +++ b/examples/mongodbatlas_search_deployment/versions.tf @@ -2,8 +2,8 @@ terraform { required_providers { mongodbatlas = { source = "mongodb/mongodbatlas" - version = "~> 1.13" + version = "~> 1.0" } } required_version = ">= 1.0" -} \ No newline at end of file +} diff --git a/examples/mongodbatlas_third_party_integration/prometheus-and-teams/third-party-integration.tf b/examples/mongodbatlas_third_party_integration/prometheus-and-teams/third-party-integration.tf index 2c624722eb..8236cd0f5e 100644 --- a/examples/mongodbatlas_third_party_integration/prometheus-and-teams/third-party-integration.tf +++ b/examples/mongodbatlas_third_party_integration/prometheus-and-teams/third-party-integration.tf @@ -10,7 +10,6 @@ resource "mongodbatlas_third_party_integration" "test_prometheus" { user_name = var.user_name password = var.password service_discovery = "file" - scheme = "https" enabled = true } diff --git a/examples/starter/Readme.md b/examples/starter/Readme.md index b145aec076..e855faa776 100644 --- a/examples/starter/Readme.md +++ b/examples/starter/Readme.md @@ -75,7 +75,7 @@ Or to fetch the connection string using terraform follow the below steps: ```hcl output "atlasclusterstring" { - value = mongodbatlas_cluster.cluster.connection_strings + value = mongodbatlas_advanced_cluster.cluster.connection_strings } ``` **Outputs:** @@ -100,7 +100,7 @@ To fetch a particular connection string, use the **lookup()** function of terraf ``` output "plstring" { - value = lookup(mongodbatlas_cluster.cluster.connection_strings[0].aws_private_link_srv, aws_vpc_endpoint.ptfe_service.id) + value = lookup(mongodbatlas_advanced_cluster.cluster.connection_strings[0].aws_private_link_srv, aws_vpc_endpoint.ptfe_service.id) } ``` **Output:** diff --git a/examples/starter/atlas_cluster.tf b/examples/starter/atlas_cluster.tf index f07552a47c..18fff374e6 100644 --- a/examples/starter/atlas_cluster.tf +++ b/examples/starter/atlas_cluster.tf @@ -1,24 +1,23 @@ -resource "mongodbatlas_cluster" "cluster" { - project_id = mongodbatlas_project.project.id - name = var.cluster_name - mongo_db_major_version = var.mongodbversion - cluster_type = "REPLICASET" +resource "mongodbatlas_advanced_cluster" "cluster" { + project_id = mongodbatlas_project.project.id + name = var.cluster_name + cluster_type = "REPLICASET" + backup_enabled = true + replication_specs { - num_shards = 1 - regions_config { - region_name = var.region - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 + region_configs { + priority = 7 + provider_name = var.cloud_provider + region_name = var.region + electable_specs { + instance_size = "M10" + node_count = 3 + } } } - # Provider Settings "block" - cloud_backup = true - auto_scaling_disk_gb_enabled = true - provider_name = var.cloud_provider - provider_instance_size_name = "M10" } + output "connection_strings" { - value = mongodbatlas_cluster.cluster.connection_strings[0].standard_srv + value = mongodbatlas_advanced_cluster.cluster.connection_strings[0].standard_srv } diff --git a/examples/starter/variables.tf b/examples/starter/variables.tf index 4307ced129..2072e854be 100644 --- a/examples/starter/variables.tf +++ b/examples/starter/variables.tf @@ -26,10 +26,6 @@ variable "region" { type = string description = "MongoDB Atlas Cluster Region, must be a region for the provider given" } -variable "mongodbversion" { - type = string - description = "The Major MongoDB Version" -} variable "dbuser" { type = string description = "MongoDB Atlas Database User Name" diff --git a/scripts/tf-validate.sh b/scripts/tf-validate.sh index d97035651a..9d6c1ffa05 100755 --- a/scripts/tf-validate.sh +++ b/scripts/tf-validate.sh @@ -16,29 +16,30 @@ set -Eeou pipefail -arch_name=$(uname -m) +# Delete Terraform execution files so the script can be run multiple times +find ./examples -type d -name ".terraform" -exec rm -rf {} + +find ./examples -type f -name ".terraform.lock.hcl" -exec rm -f {} + + +export TF_CLI_CONFIG_FILE="$PWD/bin-examples/tf-validate.tfrc" + +# Use local provider to validate examples +go build -o bin-examples/terraform-provider-mongodbatlas . + +cat << EOF > "$TF_CLI_CONFIG_FILE" +provider_installation { + dev_overrides { + "mongodb/mongodbatlas" = "$PWD/bin-examples" + } + direct {} +} +EOF for DIR in $(find ./examples -type f -name '*.tf' -exec dirname {} \; | sort -u); do [ ! -d "$DIR" ] && continue - - - # Skip directories with "v08" or "v09" in their name for ARM64 - if [[ "$arch_name" == "arm64" ]] && echo "$DIR" | grep -qE "v08|v09"; then - echo "Skip directories with \"v08\" or \"v09\" in their name for ARM64" - echo "TF provider does not have a package available for ARM64 for version < 1.0" - echo "Skipping directory: $DIR" - continue - fi - pushd "$DIR" - - echo; echo -e "\e[1;35m===> Initializing Example: $DIR <===\e[0m"; echo - terraform init - - echo; echo -e "\e[1;35m===> Format Checking Example: $DIR <===\e[0m"; echo + echo; echo -e "\e[1;35m===> Example: $DIR <===\e[0m"; echo + terraform init > /dev/null # supress output as it's very verbose terraform fmt -check -recursive - - echo; echo -e "\e[1;35m===> Validating Example: $DIR <===\e[0m"; echo terraform validate popd done diff --git a/scripts/tflint.sh b/scripts/tflint.sh deleted file mode 100755 index 9f404abac0..0000000000 --- a/scripts/tflint.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2021 MongoDB Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -Eeou pipefail - -for DIR in $(find ./examples -type f -name '*.tf' -exec dirname {} \; | sort -u); do - [ ! -d "$DIR" ] && continue - - pushd "$DIR" - - echo; echo -e "\e[1;35m===> Validating Syntax Example: $DIR <===\e[0m"; echo - # Terraform syntax checks - tflint \ - --enable-rule=terraform_deprecated_interpolation \ - --enable-rule=terraform_deprecated_index \ - --enable-rule=terraform_unused_declarations \ - --enable-rule=terraform_comment_syntax \ - --enable-rule=terraform_required_version \ - --minimum-failure-severity=warning - popd -done From 81ff1ebd5c0ccbe4ce1acc61ac234fbb20861c88 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 18 Jul 2024 19:03:52 +0200 Subject: [PATCH 55/84] fix MongoDB_Atlas (#2445) --- examples/mongodbatlas_database_user/Readme.md | 4 ++-- examples/mongodbatlas_database_user/atlas_cluster.tf | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/mongodbatlas_database_user/Readme.md b/examples/mongodbatlas_database_user/Readme.md index 08f4211b75..0290514aec 100644 --- a/examples/mongodbatlas_database_user/Readme.md +++ b/examples/mongodbatlas_database_user/Readme.md @@ -77,8 +77,8 @@ atlasclusterstring = [ "aws_private_link_srv" = {} "private" = "" "private_srv" = "" - "standard" = "mongodb://MongoDB_Atlas-shard-00-00.xgpi2.mongodb.net:27017,MongoDB_Atlas-shard-00-01.xgpi2.mongodb.net:27017,MongoDB_Atlas-shard-00-02.xgpi2.mongodb.net:27017/?ssl=true&authSource=admin&replicaSet=atlas-90b49a-shard-0" - "standard_srv" = "mongodb+srv://MongoDB_Atlas.xgpi2.mongodb.net" + "standard" = "mongodb://MongoDBAtlas-shard-00-00.xgpi2.mongodb.net:27017,MongoDBAtlas-shard-00-01.xgpi2.mongodb.net:27017,MongoDBAtlas-shard-00-02.xgpi2.mongodb.net:27017/?ssl=true&authSource=admin&replicaSet=atlas-90b49a-shard-0" + "standard_srv" = "mongodb+srv://MongoDBAtlas.xgpi2.mongodb.net" }, ] project_name = Atlas-DB-Scope diff --git a/examples/mongodbatlas_database_user/atlas_cluster.tf b/examples/mongodbatlas_database_user/atlas_cluster.tf index 985cc4462c..0c19072a80 100644 --- a/examples/mongodbatlas_database_user/atlas_cluster.tf +++ b/examples/mongodbatlas_database_user/atlas_cluster.tf @@ -1,6 +1,6 @@ resource "mongodbatlas_advanced_cluster" "cluster" { project_id = mongodbatlas_project.project1.id - name = "MongoDB_Atlas" + name = "MongoDBAtlas" cluster_type = "REPLICASET" backup_enabled = true From 0163858ee6a8d9ff660ff2ab7eb247f32ce4b296 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Fri, 19 Jul 2024 07:22:35 +0000 Subject: [PATCH 56/84] chore: Updates examples link in index.md for v1.17.4 release --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 7bef25fda4..36ddadaf0c 100644 --- a/docs/index.md +++ b/docs/index.md @@ -219,7 +219,7 @@ We ship binaries but do not prioritize fixes for the following operating system ## Examples from MongoDB and the Community -We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.3/examples) +We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.4/examples) in our GitHub repo that will help both beginner and more advanced users. Have a good example you've created and want to share? From b6a562a10e1a474c059df36f9c6fc4742bad1624 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Fri, 19 Jul 2024 07:22:57 +0000 Subject: [PATCH 57/84] chore: Updates CHANGELOG.md header for v1.17.4 release --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c1e23726ce..f76d6703ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ ## (Unreleased) +## 1.17.4 (July 19, 2024) + ENHANCEMENTS: * data-source/mongodbatlas_search_index: Adds attribute `stored_source` ([#2388](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2388)) From 27ce0b1bca1edac82d9097ca7d11c0f4918cd5dd Mon Sep 17 00:00:00 2001 From: Oriol Date: Fri, 19 Jul 2024 11:24:28 +0200 Subject: [PATCH 58/84] chore: Migrates `mongodbatlas_cloud_backup_snapshot_export_job` to new auto-generated SDK (#2436) * migrate to new auto-generated SDK * refactor and deprecate err_msg field * add changelog entry * docs * change deprecation version to 1.20 * reduce changelog explanation --- .changelog/2436.txt | 11 +++ .../cloud_backup_snapshot_export_job.md | 2 +- .../cloud_backup_snapshot_export_jobs.md | 2 +- .../cloud_backup_snapshot_export_job.md | 2 +- ...source_cloud_backup_snapshot_export_job.go | 5 +- ...ource_cloud_backup_snapshot_export_jobs.go | 56 +++++------ ...source_cloud_backup_snapshot_export_job.go | 94 ++++++++++--------- ...e_cloud_backup_snapshot_export_job_test.go | 5 +- 8 files changed, 96 insertions(+), 81 deletions(-) create mode 100644 .changelog/2436.txt diff --git a/.changelog/2436.txt b/.changelog/2436.txt new file mode 100644 index 0000000000..ee4fe558d3 --- /dev/null +++ b/.changelog/2436.txt @@ -0,0 +1,11 @@ +```release-note:note +resource/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. +``` + +```release-note:note +data-source/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. +``` + +```release-note:note +data-source/mongodbatlas_cloud_backup_snapshot_export_jobs: Deprecates the `err_msg` attribute. +``` diff --git a/docs/data-sources/cloud_backup_snapshot_export_job.md b/docs/data-sources/cloud_backup_snapshot_export_job.md index 6307ef5a10..b7af5446f7 100644 --- a/docs/data-sources/cloud_backup_snapshot_export_job.md +++ b/docs/data-sources/cloud_backup_snapshot_export_job.md @@ -49,7 +49,7 @@ In addition to all arguments above, the following attributes are exported: * `custom_data` - Custom data to include in the metadata file named `.complete` that Atlas uploads to the bucket when the export job finishes. Custom data can be specified as key and value pairs. * `components` - _Returned for sharded clusters only._ Export job details for each replica set in the sharded cluster. * `created_at` - Timestamp in ISO 8601 date and time format in UTC when the export job was created. -* `err_msg` - Error message, only if the export job failed. +* `err_msg` - Error message, only if the export job failed. **Note:** This attribute is deprecated as it is not being used. * `export_status` - _Returned for replica set only._ Status of the export job. * `finished_at` - Timestamp in ISO 8601 date and time format in UTC when the export job completes. * `export_job_id` - Unique identifier of the export job. diff --git a/docs/data-sources/cloud_backup_snapshot_export_jobs.md b/docs/data-sources/cloud_backup_snapshot_export_jobs.md index 5ffb6a7a07..c4fb5bad89 100644 --- a/docs/data-sources/cloud_backup_snapshot_export_jobs.md +++ b/docs/data-sources/cloud_backup_snapshot_export_jobs.md @@ -58,7 +58,7 @@ In addition to all arguments above, the following attributes are exported: * `custom_data` - Custom data to include in the metadata file named `.complete` that Atlas uploads to the bucket when the export job finishes. Custom data can be specified as key and value pairs. * `components` - _Returned for sharded clusters only._ Export job details for each replica set in the sharded cluster. * `created_at` - Timestamp in ISO 8601 date and time format in UTC when the export job was created. -* `err_msg` - Error message, only if the export job failed. +* `err_msg` - Error message, only if the export job failed. **Note:** This attribute is deprecated as it is not being used. * `export_status` - _Returned for replica set only._ Status of the export job. * `finished_at` - Timestamp in ISO 8601 date and time format in UTC when the export job completes. * `export_job_id` - Unique identifier of the export job. diff --git a/docs/resources/cloud_backup_snapshot_export_job.md b/docs/resources/cloud_backup_snapshot_export_job.md index 2fdc724104..2eb9c404df 100644 --- a/docs/resources/cloud_backup_snapshot_export_job.md +++ b/docs/resources/cloud_backup_snapshot_export_job.md @@ -101,7 +101,7 @@ In addition to all arguments above, the following attributes are exported: * `components` - _Returned for sharded clusters only._ Export job details for each replica set in the sharded cluster. * `created_at` - Timestamp in ISO 8601 date and time format in UTC when the export job was created. -* `err_msg` - Error message, only if the export job failed. +* `err_msg` - Error message, only if the export job failed. **Note:** This attribute is deprecated as it is not being used. * `export_status` - _Returned for replica set only._ Status of the export job. * `finished_at` - Timestamp in ISO 8601 date and time format in UTC when the export job completes. * `export_job_id` - Unique identifier of the export job. diff --git a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go index 54262d2a2a..66c6666965 100644 --- a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go +++ b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go @@ -71,8 +71,9 @@ func DataSource() *schema.Resource { Computed: true, }, "err_msg": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.20.0"), }, "export_bucket_id": { Type: schema.TypeString, diff --git a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go index 23b1fda897..216fa1a778 100644 --- a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go +++ b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go @@ -2,17 +2,20 @@ package cloudbackupsnapshotexportjob import ( "context" + "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - matlas "go.mongodb.org/atlas/mongodbatlas" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasCloudBackupSnapshotsExportJobsRead, + ReadContext: dataSourceRead, Schema: map[string]*schema.Schema{ "project_id": { Type: schema.TypeString, @@ -79,8 +82,9 @@ func PluralDataSource() *schema.Resource { Computed: true, }, "err_msg": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.20.0"), }, "export_bucket_id": { Type: schema.TypeString, @@ -117,28 +121,24 @@ func PluralDataSource() *schema.Resource { } } -func dataSourceMongoDBAtlasCloudBackupSnapshotsExportJobsRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. - conn := meta.(*config.MongoDBClient).Atlas +func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) + pageNum := d.Get("page_num").(int) + itemsPerPage := d.Get("items_per_page").(int) - options := &matlas.ListOptions{ - PageNum: d.Get("page_num").(int), - ItemsPerPage: d.Get("items_per_page").(int), - } - - jobs, _, err := conn.CloudProviderSnapshotExportJobs.List(ctx, projectID, clusterName, options) + jobs, _, err := connV2.CloudBackupsApi.ListBackupExportJobs(ctx, projectID, clusterName).PageNum(pageNum).ItemsPerPage(itemsPerPage).Execute() if err != nil { return diag.Errorf("error getting CloudProviderSnapshotExportJobs information: %s", err) } - if err := d.Set("results", flattenCloudBackupSnapshotExportJobs(jobs.Results)); err != nil { + if err := d.Set("results", flattenCloudBackupSnapshotExportJobs(jobs.GetResults())); err != nil { return diag.Errorf("error setting `results`: %s", err) } - if err := d.Set("total_count", jobs.TotalCount); err != nil { + if err := d.Set("total_count", jobs.GetTotalCount()); err != nil { return diag.Errorf("error setting `total_count`: %s", err) } @@ -147,7 +147,7 @@ func dataSourceMongoDBAtlasCloudBackupSnapshotsExportJobsRead(ctx context.Contex return nil } -func flattenCloudBackupSnapshotExportJobs(jobs []*matlas.CloudProviderSnapshotExportJob) []map[string]any { +func flattenCloudBackupSnapshotExportJobs(jobs []admin.DiskBackupExportJob) []map[string]any { var results []map[string]any if len(jobs) == 0 { @@ -158,18 +158,18 @@ func flattenCloudBackupSnapshotExportJobs(jobs []*matlas.CloudProviderSnapshotEx for k, job := range jobs { results[k] = map[string]any{ - "export_job_id": job.ID, - "created_at": job.CreatedAt, - "components": flattenExportJobsComponents(job.Components), - "custom_data": flattenExportJobsCustomData(job.CustomData), - "err_msg": job.ErrMsg, - "export_bucket_id": job.ExportBucketID, - "export_status_exported_collections": job.ExportStatus.ExportedCollections, - "export_status_total_collections": job.ExportStatus.TotalCollections, - "finished_at": job.FinishedAt, - "prefix": job.Prefix, - "snapshot_id": job.SnapshotID, - "state": job.State, + "export_job_id": job.GetId(), + "created_at": conversion.TimePtrToStringPtr(job.CreatedAt), + "components": flattenExportJobsComponents(job.GetComponents()), + "custom_data": flattenExportJobsCustomData(job.GetCustomData()), + "export_bucket_id": job.GetExportBucketId(), + "err_msg": "", + "export_status_exported_collections": job.ExportStatus.GetExportedCollections(), + "export_status_total_collections": job.ExportStatus.GetTotalCollections(), + "finished_at": conversion.TimePtrToStringPtr(job.FinishedAt), + "prefix": job.GetPrefix(), + "snapshot_id": job.GetSnapshotId(), + "state": job.GetState(), } } diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go index 8405ac9a1b..246d536df8 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go @@ -8,18 +8,19 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - matlas "go.mongodb.org/atlas/mongodbatlas" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func Resource() *schema.Resource { return &schema.Resource{ - CreateContext: resourceMongoDBAtlasCloudBackupSnapshotExportJobCreate, - ReadContext: resourceMongoDBAtlasCloudBackupSnapshotExportJobRead, + CreateContext: resourceCreate, + ReadContext: resourceRead, DeleteContext: resourceDelete, Importer: &schema.ResourceImporter{ - StateContext: resourceMongoDBAtlasCloudBackupSnapshotExportJobImportState, + StateContext: resourceImportState, }, Schema: returnCloudBackupSnapshotExportJobSchema(), } @@ -94,8 +95,9 @@ func returnCloudBackupSnapshotExportJobSchema() map[string]*schema.Schema { Computed: true, }, "err_msg": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.20.0"), }, "export_status_exported_collections": { Type: schema.TypeInt, @@ -120,7 +122,7 @@ func returnCloudBackupSnapshotExportJobSchema() map[string]*schema.Schema { } } -func resourceMongoDBAtlasCloudBackupSnapshotExportJobRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { exportJob, err := readExportJob(ctx, meta, d) if err != nil { reset := strings.Contains(err.Error(), "404") && !d.IsNewResource() @@ -135,8 +137,8 @@ func resourceMongoDBAtlasCloudBackupSnapshotExportJobRead(ctx context.Context, d return setExportJobFields(d, exportJob) } -func readExportJob(ctx context.Context, meta any, d *schema.ResourceData) (*matlas.CloudProviderSnapshotExportJob, error) { - conn := meta.(*config.MongoDBClient).Atlas +func readExportJob(ctx context.Context, meta any, d *schema.ResourceData) (*admin.DiskBackupExportJob, error) { + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID, clusterName, exportID := getRequiredFields(d) if d.Id() != "" && (projectID == "" || clusterName == "" || exportID == "") { ids := conversion.DecodeStateID(d.Id()) @@ -144,12 +146,12 @@ func readExportJob(ctx context.Context, meta any, d *schema.ResourceData) (*matl clusterName = ids["cluster_name"] exportID = ids["export_job_id"] } - exportJob, _, err := conn.CloudProviderSnapshotExportJobs.Get(ctx, projectID, clusterName, exportID) + exportJob, _, err := connV2.CloudBackupsApi.GetBackupExportJob(ctx, projectID, clusterName, exportID).Execute() if err == nil { d.SetId(conversion.EncodeStateID(map[string]string{ "project_id": projectID, "cluster_name": clusterName, - "export_job_id": exportJob.ID, + "export_job_id": exportJob.GetId(), })) } return exportJob, err @@ -162,61 +164,61 @@ func getRequiredFields(d *schema.ResourceData) (projectID, clusterName, exportID return projectID, clusterName, exportID } -func setExportJobFields(d *schema.ResourceData, exportJob *matlas.CloudProviderSnapshotExportJob) diag.Diagnostics { - if err := d.Set("export_job_id", exportJob.ID); err != nil { +func setExportJobFields(d *schema.ResourceData, exportJob *admin.DiskBackupExportJob) diag.Diagnostics { + if err := d.Set("export_job_id", exportJob.GetId()); err != nil { return diag.Errorf("error setting `export_job_id` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("snapshot_id", exportJob.SnapshotID); err != nil { + if err := d.Set("snapshot_id", exportJob.GetSnapshotId()); err != nil { return diag.Errorf("error setting `snapshot_id` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("custom_data", flattenExportJobsCustomData(exportJob.CustomData)); err != nil { + if err := d.Set("custom_data", flattenExportJobsCustomData(exportJob.GetCustomData())); err != nil { return diag.Errorf("error setting `custom_data` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("components", flattenExportJobsComponents(exportJob.Components)); err != nil { + if err := d.Set("components", flattenExportJobsComponents(exportJob.GetComponents())); err != nil { return diag.Errorf("error setting `components` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("created_at", exportJob.CreatedAt); err != nil { + if err := d.Set("created_at", conversion.TimePtrToStringPtr(exportJob.CreatedAt)); err != nil { return diag.Errorf("error setting `created_at` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("err_msg", exportJob.ErrMsg); err != nil { + if err := d.Set("err_msg", ""); err != nil { return diag.Errorf("error setting `created_at` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("export_bucket_id", exportJob.ExportBucketID); err != nil { + if err := d.Set("export_bucket_id", exportJob.GetExportBucketId()); err != nil { return diag.Errorf("error setting `created_at` for snapshot export job (%s): %s", d.Id(), err) } if exportJob.ExportStatus != nil { - if err := d.Set("export_status_exported_collections", exportJob.ExportStatus.ExportedCollections); err != nil { + if err := d.Set("export_status_exported_collections", exportJob.ExportStatus.GetExportedCollections()); err != nil { return diag.Errorf("error setting `export_status_exported_collections` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("export_status_total_collections", exportJob.ExportStatus.TotalCollections); err != nil { + if err := d.Set("export_status_total_collections", exportJob.ExportStatus.GetTotalCollections()); err != nil { return diag.Errorf("error setting `export_status_total_collections` for snapshot export job (%s): %s", d.Id(), err) } } - if err := d.Set("finished_at", exportJob.FinishedAt); err != nil { + if err := d.Set("finished_at", conversion.TimePtrToStringPtr(exportJob.FinishedAt)); err != nil { return diag.Errorf("error setting `finished_at` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("prefix", exportJob.Prefix); err != nil { + if err := d.Set("prefix", exportJob.GetPrefix()); err != nil { return diag.Errorf("error setting `prefix` for snapshot export job (%s): %s", d.Id(), err) } - if err := d.Set("state", exportJob.State); err != nil { + if err := d.Set("state", exportJob.GetState()); err != nil { return diag.Errorf("error setting `prefix` for snapshot export job (%s): %s", d.Id(), err) } return nil } -func flattenExportJobsComponents(components []*matlas.CloudProviderSnapshotExportJobComponent) []map[string]any { +func flattenExportJobsComponents(components []admin.DiskBackupExportMember) []map[string]any { if len(components) == 0 { return nil } @@ -225,15 +227,15 @@ func flattenExportJobsComponents(components []*matlas.CloudProviderSnapshotExpor for i := range components { customData = append(customData, map[string]any{ - "export_id": components[i].ExportID, - "replica_set_name": components[i].ReplicaSetName, + "export_id": (components)[i].GetExportId(), + "replica_set_name": (components)[i].GetReplicaSetName(), }) } return customData } -func flattenExportJobsCustomData(data []*matlas.CloudProviderSnapshotExportJobCustomData) []map[string]any { +func flattenExportJobsCustomData(data []admin.BackupLabel) []map[string]any { if len(data) == 0 { return nil } @@ -242,52 +244,52 @@ func flattenExportJobsCustomData(data []*matlas.CloudProviderSnapshotExportJobCu for i := range data { customData = append(customData, map[string]any{ - "key": data[i].Key, - "value": data[i].Value, + "key": data[i].GetKey(), + "value": data[i].GetValue(), }) } return customData } -func resourceMongoDBAtlasCloudBackupSnapshotExportJobCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := meta.(*config.MongoDBClient).Atlas +func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) - request := &matlas.CloudProviderSnapshotExportJob{ - SnapshotID: d.Get("snapshot_id").(string), - ExportBucketID: d.Get("export_bucket_id").(string), + request := &admin.DiskBackupExportJobRequest{ + SnapshotId: d.Get("snapshot_id").(string), + ExportBucketId: d.Get("export_bucket_id").(string), CustomData: expandExportJobCustomData(d), } - jobResponse, _, err := conn.CloudProviderSnapshotExportJobs.Create(ctx, projectID, clusterName, request) + jobResponse, _, err := connV2.CloudBackupsApi.CreateBackupExportJob(ctx, projectID, clusterName, request).Execute() if err != nil { return diag.Errorf("error creating snapshot export job: %s", err) } - if err := d.Set("export_job_id", jobResponse.ID); err != nil { - return diag.Errorf("error setting `export_job_id` for snapshot export job (%s): %s", jobResponse.ID, err) + if err := d.Set("export_job_id", jobResponse.Id); err != nil { + return diag.Errorf("error setting `export_job_id` for snapshot export job (%s): %s", *jobResponse.Id, err) } - return resourceMongoDBAtlasCloudBackupSnapshotExportJobRead(ctx, d, meta) + return resourceRead(ctx, d, meta) } -func expandExportJobCustomData(d *schema.ResourceData) []*matlas.CloudProviderSnapshotExportJobCustomData { +func expandExportJobCustomData(d *schema.ResourceData) *[]admin.BackupLabel { customData := d.Get("custom_data").(*schema.Set) - res := make([]*matlas.CloudProviderSnapshotExportJobCustomData, customData.Len()) + res := make([]admin.BackupLabel, customData.Len()) for i, val := range customData.List() { v := val.(map[string]any) - res[i] = &matlas.CloudProviderSnapshotExportJobCustomData{ - Key: v["key"].(string), - Value: v["value"].(string), + res[i] = admin.BackupLabel{ + Key: conversion.Pointer(v["key"].(string)), + Value: conversion.Pointer(v["value"].(string)), } } - return res + return &res } -func resourceMongoDBAtlasCloudBackupSnapshotExportJobImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { +func resourceImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { conn := meta.(*config.MongoDBClient).Atlas parts := strings.SplitN(d.Id(), "--", 3) diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go index 7ebf7f5694..721b12c7c3 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go @@ -41,8 +41,9 @@ func basicTestCase(tb testing.TB) *resource.TestCase { "project_id": projectID, } attrsPluralDS = map[string]string{ - "project_id": projectID, - "results.0.custom_data.0.key": "exported by", + "project_id": projectID, + "results.0.custom_data.0.key": "exported by", + "results.0.custom_data.0.value": "tf-acc-test", } ) checks := []resource.TestCheckFunc{checkExists(resourceName)} From 3de3793a1cef90b3c5617d89f97bc69ae87265f3 Mon Sep 17 00:00:00 2001 From: Oriol Date: Fri, 19 Jul 2024 11:25:13 +0200 Subject: [PATCH 59/84] chore: Migrates `mongodbatlas_project_api_key` to new auto-generated SDK (#2437) * resource create * migrate update read and delete of resource * data sources migrated to new sdk * remove apiUserId from create and update in payload(is read only) * PR comments --- .../data_source_project_api_key.go | 21 +- .../data_source_project_api_keys.go | 32 ++- .../projectapikey/resource_project_api_key.go | 183 +++++++++--------- .../resource_project_api_key_test.go | 15 +- 4 files changed, 120 insertions(+), 131 deletions(-) diff --git a/internal/service/projectapikey/data_source_project_api_key.go b/internal/service/projectapikey/data_source_project_api_key.go index 29c8aa410e..eb335115a5 100644 --- a/internal/service/projectapikey/data_source_project_api_key.go +++ b/internal/service/projectapikey/data_source_project_api_key.go @@ -12,7 +12,7 @@ import ( func DataSource() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasProjectAPIKeyRead, + ReadContext: dataSourceRead, Schema: map[string]*schema.Schema{ "project_id": { Type: schema.TypeString, @@ -57,35 +57,34 @@ func DataSource() *schema.Resource { } } -func dataSourceMongoDBAtlasProjectAPIKeyRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. - conn := meta.(*config.MongoDBClient).Atlas +func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) apiKeyID := d.Get("api_key_id").(string) - projectAPIKeys, _, err := conn.ProjectAPIKeys.List(ctx, projectID, nil) + projectAPIKeys, _, err := connV2.ProgrammaticAPIKeysApi.ListProjectApiKeys(ctx, projectID).Execute() if err != nil { return diag.FromErr(fmt.Errorf("error getting api key information: %s", err)) } - for _, val := range projectAPIKeys { - if val.ID != apiKeyID { + for _, val := range projectAPIKeys.GetResults() { + if val.GetId() != apiKeyID { continue } - if err := d.Set("description", val.Desc); err != nil { + if err := d.Set("description", val.GetDesc()); err != nil { return diag.FromErr(fmt.Errorf("error setting `description`: %s", err)) } - if err := d.Set("public_key", val.PublicKey); err != nil { + if err := d.Set("public_key", val.GetPublicKey()); err != nil { return diag.FromErr(fmt.Errorf("error setting `public_key`: %s", err)) } - if err := d.Set("private_key", val.PrivateKey); err != nil { + if err := d.Set("private_key", val.GetPrivateKey()); err != nil { return diag.FromErr(fmt.Errorf("error setting `private_key`: %s", err)) } - if projectAssignments, err := newProjectAssignment(ctx, conn, apiKeyID); err == nil { + if projectAssignments, err := newProjectAssignment(ctx, connV2, apiKeyID); err == nil { if err := d.Set("project_assignment", projectAssignments); err != nil { return diag.Errorf(ErrorProjectSetting, `project_assignment`, projectID, err) } diff --git a/internal/service/projectapikey/data_source_project_api_keys.go b/internal/service/projectapikey/data_source_project_api_keys.go index 175839363c..117a11c436 100644 --- a/internal/service/projectapikey/data_source_project_api_keys.go +++ b/internal/service/projectapikey/data_source_project_api_keys.go @@ -8,13 +8,12 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - - matlas "go.mongodb.org/atlas/mongodbatlas" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func PluralDataSource() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasProjectAPIKeysRead, + ReadContext: pluralDataSourceRead, Schema: map[string]*schema.Schema{ "project_id": { Type: schema.TypeString, @@ -75,22 +74,19 @@ func PluralDataSource() *schema.Resource { } } -func dataSourceMongoDBAtlasProjectAPIKeysRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. - conn := meta.(*config.MongoDBClient).Atlas - options := &matlas.ListOptions{ - PageNum: d.Get("page_num").(int), - ItemsPerPage: d.Get("items_per_page").(int), - } +func pluralDataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 + pageNum := d.Get("page_num").(int) + itemsPerPage := d.Get("items_per_page").(int) projectID := d.Get("project_id").(string) - apiKeys, _, err := conn.ProjectAPIKeys.List(ctx, projectID, options) + apiKeys, _, err := connV2.ProgrammaticAPIKeysApi.ListProjectApiKeys(ctx, projectID).PageNum(pageNum).ItemsPerPage(itemsPerPage).Execute() if err != nil { return diag.FromErr(fmt.Errorf("error getting api keys information: %s", err)) } - results, err := flattenProjectAPIKeys(ctx, conn, projectID, apiKeys) + results, err := flattenProjectAPIKeys(ctx, connV2, apiKeys.GetResults()) if err != nil { diag.FromErr(fmt.Errorf("error setting `results`: %s", err)) } @@ -104,7 +100,7 @@ func dataSourceMongoDBAtlasProjectAPIKeysRead(ctx context.Context, d *schema.Res return nil } -func flattenProjectAPIKeys(ctx context.Context, conn *matlas.Client, projectID string, apiKeys []matlas.APIKey) ([]map[string]any, error) { +func flattenProjectAPIKeys(ctx context.Context, connV2 *admin.APIClient, apiKeys []admin.ApiKeyUserDetails) ([]map[string]any, error) { var results []map[string]any if len(apiKeys) == 0 { @@ -114,13 +110,13 @@ func flattenProjectAPIKeys(ctx context.Context, conn *matlas.Client, projectID s results = make([]map[string]any, len(apiKeys)) for k, apiKey := range apiKeys { results[k] = map[string]any{ - "api_key_id": apiKey.ID, - "description": apiKey.Desc, - "public_key": apiKey.PublicKey, - "private_key": apiKey.PrivateKey, + "api_key_id": apiKey.GetId(), + "description": apiKey.GetDesc(), + "public_key": apiKey.GetPublicKey(), + "private_key": apiKey.GetPrivateKey(), } - projectAssignment, err := newProjectAssignment(ctx, conn, apiKey.ID) + projectAssignment, err := newProjectAssignment(ctx, connV2, apiKey.GetId()) if err != nil { return nil, err } diff --git a/internal/service/projectapikey/resource_project_api_key.go b/internal/service/projectapikey/resource_project_api_key.go index 518733b8f4..439f4dfaa4 100644 --- a/internal/service/projectapikey/resource_project_api_key.go +++ b/internal/service/projectapikey/resource_project_api_key.go @@ -12,7 +12,6 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "go.mongodb.org/atlas-sdk/v20240530002/admin" - matlas "go.mongodb.org/atlas/mongodbatlas" ) const ( @@ -21,12 +20,12 @@ const ( func Resource() *schema.Resource { return &schema.Resource{ - CreateContext: resourceMongoDBAtlasProjectAPIKeyCreate, - ReadContext: resourceMongoDBAtlasProjectAPIKeyRead, - UpdateContext: resourceMongoDBAtlasProjectAPIKeyUpdate, - DeleteContext: resourceMongoDBAtlasProjectAPIKeyDelete, + CreateContext: resourceCreate, + ReadContext: resourceRead, + UpdateContext: resourceUpdate, + DeleteContext: resourceDelete, Importer: &schema.ResourceImporter{ - StateContext: resourceMongoDBAtlasProjectAPIKeyImportState, + StateContext: resourceImportState, }, Schema: map[string]*schema.Schema{ "api_key_id": { @@ -77,37 +76,36 @@ type APIProjectAssignmentKeyInput struct { const errorNoProjectAssignmentDefined = "could not obtain a project id as no assignments are defined" -func resourceMongoDBAtlasProjectAPIKeyCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := meta.(*config.MongoDBClient).Atlas +func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 - var apiKey *matlas.APIKey + var apiKey *admin.ApiKeyUserDetails + var resp *http.Response var err error - var resp *matlas.Response - createRequest := new(matlas.APIKeyInput) - createRequest.Desc = d.Get("description").(string) + createRequest := &admin.CreateAtlasProjectApiKey{ + Desc: d.Get("description").(string), + } + if projectAssignments, ok := d.GetOk("project_assignment"); ok { projectAssignmentList := ExpandProjectAssignmentSet(projectAssignments.(*schema.Set)) // creates api key using project id of first defined project assignment firstAssignment := projectAssignmentList[0] createRequest.Roles = firstAssignment.RoleNames - apiKey, resp, err = conn.ProjectAPIKeys.Create(ctx, firstAssignment.ProjectID, createRequest) + apiKey, resp, err = connV2.ProgrammaticAPIKeysApi.CreateProjectApiKey(ctx, firstAssignment.ProjectID, createRequest).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { d.SetId("") return nil } - return diag.FromErr(err) } // assign created api key to remaining project assignments for _, apiKeyList := range projectAssignmentList[1:] { - createRequest.Roles = apiKeyList.RoleNames - _, err := conn.ProjectAPIKeys.Assign(ctx, apiKeyList.ProjectID, apiKey.ID, &matlas.AssignAPIKey{ - Roles: createRequest.Roles, - }) + assignment := []admin.UserAccessRoleAssignment{{Roles: &apiKeyList.RoleNames}} + _, _, err := connV2.ProgrammaticAPIKeysApi.AddProjectApiKey(ctx, apiKeyList.ProjectID, apiKey.GetId(), &assignment).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -117,24 +115,23 @@ func resourceMongoDBAtlasProjectAPIKeyCreate(ctx context.Context, d *schema.Reso } } - if err := d.Set("public_key", apiKey.PublicKey); err != nil { + if err := d.Set("public_key", apiKey.GetPublicKey()); err != nil { return diag.FromErr(fmt.Errorf("error setting `public_key`: %s", err)) } - if err := d.Set("private_key", apiKey.PrivateKey); err != nil { + if err := d.Set("private_key", apiKey.GetPrivateKey()); err != nil { return diag.FromErr(fmt.Errorf("error setting `private_key`: %s", err)) } d.SetId(conversion.EncodeStateID(map[string]string{ - "api_key_id": apiKey.ID, + "api_key_id": apiKey.GetId(), })) - return resourceMongoDBAtlasProjectAPIKeyRead(ctx, d, meta) + return resourceRead(ctx, d, meta) } -func resourceMongoDBAtlasProjectAPIKeyRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. - conn := meta.(*config.MongoDBClient).Atlas +func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) apiKeyID := ids["api_key_id"] @@ -143,30 +140,30 @@ func resourceMongoDBAtlasProjectAPIKeyRead(ctx context.Context, d *schema.Resour return diag.FromErr(fmt.Errorf("could not obtain a project id from state: %s", err)) } - projectAPIKeys, _, err := conn.ProjectAPIKeys.List(ctx, *firstProjectID, nil) + projectAPIKeys, _, err := connV2.ProgrammaticAPIKeysApi.ListProjectApiKeys(ctx, *firstProjectID).Execute() if err != nil { return diag.FromErr(fmt.Errorf("error getting api key information: %s", err)) } apiKeyIsPresent := false - for _, val := range projectAPIKeys { - if val.ID != apiKeyID { + for _, val := range projectAPIKeys.GetResults() { + if val.GetId() != apiKeyID { continue } apiKeyIsPresent = true - if err := d.Set("api_key_id", val.ID); err != nil { + if err := d.Set("api_key_id", val.GetId()); err != nil { return diag.FromErr(fmt.Errorf("error setting `api_key_id`: %s", err)) } - if err := d.Set("description", val.Desc); err != nil { + if err := d.Set("description", val.GetDesc()); err != nil { return diag.FromErr(fmt.Errorf("error setting `description`: %s", err)) } - if err := d.Set("public_key", val.PublicKey); err != nil { + if err := d.Set("public_key", val.GetPublicKey()); err != nil { return diag.FromErr(fmt.Errorf("error setting `public_key`: %s", err)) } - if projectAssignments, err := newProjectAssignment(ctx, conn, apiKeyID); err == nil { + if projectAssignments, err := newProjectAssignment(ctx, connV2, apiKeyID); err == nil { if err := d.Set("project_assignment", projectAssignments); err != nil { return diag.Errorf("error setting `project_assignment` : %s", err) } @@ -181,8 +178,7 @@ func resourceMongoDBAtlasProjectAPIKeyRead(ctx context.Context, d *schema.Resour return nil } -func resourceMongoDBAtlasProjectAPIKeyUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := meta.(*config.MongoDBClient).Atlas +func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) @@ -197,9 +193,8 @@ func resourceMongoDBAtlasProjectAPIKeyUpdate(ctx context.Context, d *schema.Reso for _, apiKey := range newAssignments { projectID := apiKey.(map[string]any)["project_id"].(string) roles := conversion.ExpandStringList(apiKey.(map[string]any)["role_names"].(*schema.Set).List()) - _, err := conn.ProjectAPIKeys.Assign(ctx, projectID, apiKeyID, &matlas.AssignAPIKey{ - Roles: roles, - }) + assignment := []admin.UserAccessRoleAssignment{{Roles: &roles}} + _, _, err := connV2.ProgrammaticAPIKeysApi.AddProjectApiKey(ctx, projectID, apiKeyID, &assignment).Execute() if err != nil { return diag.Errorf("error assigning api_keys into the project(%s): %s", projectID, err) } @@ -209,7 +204,7 @@ func resourceMongoDBAtlasProjectAPIKeyUpdate(ctx context.Context, d *schema.Reso // Removing projects assignments for _, apiKey := range removedAssignments { projectID := apiKey.(map[string]any)["project_id"].(string) - _, err := conn.ProjectAPIKeys.Unassign(ctx, projectID, apiKeyID) + _, _, err := connV2.ProgrammaticAPIKeysApi.RemoveProjectApiKey(ctx, projectID, apiKeyID).Execute() if err != nil && strings.Contains(err.Error(), "GROUP_NOT_FOUND") { continue // allows removing assignment for a project that has been deleted } @@ -222,9 +217,8 @@ func resourceMongoDBAtlasProjectAPIKeyUpdate(ctx context.Context, d *schema.Reso for _, apiKey := range changedAssignments { projectID := apiKey.(map[string]any)["project_id"].(string) roles := conversion.ExpandStringList(apiKey.(map[string]any)["role_names"].(*schema.Set).List()) - _, err := conn.ProjectAPIKeys.Assign(ctx, projectID, apiKeyID, &matlas.AssignAPIKey{ - Roles: roles, - }) + assignment := []admin.UserAccessRoleAssignment{{Roles: &roles}} + _, _, err := connV2.ProgrammaticAPIKeysApi.AddProjectApiKey(ctx, projectID, apiKeyID, &assignment).Execute() if err != nil { return diag.Errorf("error updating role names for the api_key(%s): %s", apiKey, err) } @@ -245,11 +239,11 @@ func resourceMongoDBAtlasProjectAPIKeyUpdate(ctx context.Context, d *schema.Reso } } - return resourceMongoDBAtlasProjectAPIKeyRead(ctx, d, meta) + return resourceRead(ctx, d, meta) } -func resourceMongoDBAtlasProjectAPIKeyDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := meta.(*config.MongoDBClient).Atlas +func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) apiKeyID := ids["api_key_id"] var orgID string @@ -259,42 +253,40 @@ func resourceMongoDBAtlasProjectAPIKeyDelete(ctx context.Context, d *schema.Reso return diag.FromErr(fmt.Errorf("could not obtain a project id from state: %s", err)) } - projectAPIKeys, _, err := conn.ProjectAPIKeys.List(ctx, *firstProjectID, nil) + projectAPIKeys, _, err := connV2.ProgrammaticAPIKeysApi.ListProjectApiKeys(ctx, *firstProjectID).Execute() if err != nil { return diag.FromErr(fmt.Errorf("error getting api key information: %s", err)) } - for _, val := range projectAPIKeys { - if val.ID == apiKeyID { - for i, role := range val.Roles { - if strings.HasPrefix(role.RoleName, "ORG_") { - orgID = val.Roles[i].OrgID + for _, val := range projectAPIKeys.GetResults() { + if val.GetId() == apiKeyID { + for i, role := range val.GetRoles() { + if strings.HasPrefix(role.GetRoleName(), "ORG_") { + orgID = val.GetRoles()[i].GetOrgId() } } } } - options := &matlas.ListOptions{} - - apiKeyOrgList, _, err := conn.Root.List(ctx, options) + apiKeyOrgList, _, err := connV2.RootApi.GetSystemStatus(ctx).Execute() if err != nil { return diag.FromErr(fmt.Errorf("error getting api key information: %s", err)) } - projectAssignments, err := getAPIProjectAssignments(ctx, conn, apiKeyOrgList, apiKeyID) + projectAssignments, err := getAPIProjectAssignments(ctx, connV2, apiKeyOrgList, apiKeyID) if err != nil { return diag.FromErr(fmt.Errorf("error getting api key information: %s", err)) } for _, apiKey := range projectAssignments { - _, err = conn.ProjectAPIKeys.Unassign(ctx, apiKey.ProjectID, apiKeyID) + _, _, err = connV2.ProgrammaticAPIKeysApi.RemoveProjectApiKey(ctx, apiKey.ProjectID, apiKeyID).Execute() if err != nil { return diag.FromErr(fmt.Errorf("error deleting project api key: %s", err)) } } if orgID != "" { - if _, err = conn.APIKeys.Delete(ctx, orgID, apiKeyID); err != nil { + if _, _, err = connV2.ProgrammaticAPIKeysApi.DeleteApiKey(ctx, orgID, apiKeyID).Execute(); err != nil { return diag.FromErr(fmt.Errorf("error unable to delete Key (%s): %s", apiKeyID, err)) } } @@ -303,8 +295,8 @@ func resourceMongoDBAtlasProjectAPIKeyDelete(ctx context.Context, d *schema.Reso return nil } -func resourceMongoDBAtlasProjectAPIKeyImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - conn := meta.(*config.MongoDBClient).Atlas +func resourceImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + connV2 := meta.(*config.MongoDBClient).AtlasV2 parts := strings.SplitN(d.Id(), "-", 2) if len(parts) != 2 { @@ -314,28 +306,28 @@ func resourceMongoDBAtlasProjectAPIKeyImportState(ctx context.Context, d *schema projectID := parts[0] apiKeyID := parts[1] - projectAPIKeys, _, err := conn.ProjectAPIKeys.List(ctx, projectID, nil) + projectAPIKeys, _, err := connV2.ProgrammaticAPIKeysApi.ListProjectApiKeys(ctx, projectID).Execute() if err != nil { return nil, fmt.Errorf("couldn't import api key %s in project %s, error: %s", projectID, apiKeyID, err) } - for _, val := range projectAPIKeys { - if val.ID == apiKeyID { - if err := d.Set("description", val.Desc); err != nil { + for _, val := range projectAPIKeys.GetResults() { + if val.GetId() == apiKeyID { + if err := d.Set("description", val.GetDesc()); err != nil { return nil, fmt.Errorf("error setting `description`: %s", err) } - if err := d.Set("public_key", val.PublicKey); err != nil { + if err := d.Set("public_key", val.GetPublicKey()); err != nil { return nil, fmt.Errorf("error setting `public_key`: %s", err) } - if projectAssignments, err := newProjectAssignment(ctx, conn, apiKeyID); err == nil { + if projectAssignments, err := newProjectAssignment(ctx, connV2, apiKeyID); err == nil { if err := d.Set("project_assignment", projectAssignments); err != nil { return nil, fmt.Errorf("error setting `project_assignment`: %s", err) } } d.SetId(conversion.EncodeStateID(map[string]string{ - "api_key_id": val.ID, + "api_key_id": val.GetId(), })) } } @@ -353,7 +345,7 @@ func getFirstProjectIDFromAssignments(d *schema.ResourceData) (*string, error) { return nil, errors.New(errorNoProjectAssignmentDefined) } -func flattenProjectAPIKeyRoles(projectID string, apiKeyRoles []matlas.AtlasRole) []string { +func flattenProjectAPIKeyRoles(projectID string, apiKeyRoles []admin.CloudAccessRoleAssignment) []string { if len(apiKeyRoles) == 0 { return nil } @@ -361,8 +353,8 @@ func flattenProjectAPIKeyRoles(projectID string, apiKeyRoles []matlas.AtlasRole) flattenedOrgRoles := []string{} for _, role := range apiKeyRoles { - if strings.HasPrefix(role.RoleName, "GROUP_") && role.GroupID == projectID { - flattenedOrgRoles = append(flattenedOrgRoles, role.RoleName) + if strings.HasPrefix(role.GetRoleName(), "GROUP_") && role.GetGroupId() == projectID { + flattenedOrgRoles = append(flattenedOrgRoles, role.GetRoleName()) } } @@ -383,26 +375,28 @@ func ExpandProjectAssignmentSet(projectAssignments *schema.Set) []*APIProjectAss return res } -func newProjectAssignment(ctx context.Context, conn *matlas.Client, apiKeyID string) ([]map[string]any, error) { - apiKeyOrgList, _, err := conn.Root.List(ctx, nil) +func newProjectAssignment(ctx context.Context, connV2 *admin.APIClient, apiKeyID string) ([]map[string]any, error) { + apiKeyOrgList, _, err := connV2.RootApi.GetSystemStatus(ctx).Execute() if err != nil { return nil, fmt.Errorf("error getting api key information: %s", err) } - projectAssignments, err := getAPIProjectAssignments(ctx, conn, apiKeyOrgList, apiKeyID) + projectAssignments, err := getAPIProjectAssignments(ctx, connV2, apiKeyOrgList, apiKeyID) if err != nil { return nil, fmt.Errorf("error getting api key information: %s", err) } var results []map[string]any - var atlasRoles []matlas.AtlasRole - var atlasRole matlas.AtlasRole + var atlasRoles []admin.CloudAccessRoleAssignment if len(projectAssignments) > 0 { results = make([]map[string]any, len(projectAssignments)) for k, apiKey := range projectAssignments { for _, roleName := range apiKey.RoleNames { - atlasRole.GroupID = apiKey.ProjectID - atlasRole.RoleName = roleName + atlasRole := admin.CloudAccessRoleAssignment{ + GroupId: &apiKey.ProjectID, + RoleName: &roleName, + } + atlasRoles = append(atlasRoles, atlasRole) } results[k] = map[string]any{ @@ -442,31 +436,32 @@ func getStateProjectAssignmentAPIKeys(d *schema.ResourceData) (newAssignments, c return } -func getAPIProjectAssignments(ctx context.Context, conn *matlas.Client, apiKeyOrgList *matlas.Root, apiKeyID string) ([]APIProjectAssignmentKeyInput, error) { +func getAPIProjectAssignments(ctx context.Context, connV2 *admin.APIClient, apiKeyOrgList *admin.SystemStatus, apiKeyID string) ([]APIProjectAssignmentKeyInput, error) { projectAssignments := []APIProjectAssignmentKeyInput{} - for idx, role := range apiKeyOrgList.APIKey.Roles { - if strings.HasPrefix(role.RoleName, "ORG_") { - orgKeys, _, err := conn.APIKeys.List(ctx, apiKeyOrgList.APIKey.Roles[idx].OrgID, nil) - if err != nil { - return nil, fmt.Errorf("error getting api key information: %s", err) - } - for _, val := range orgKeys { - if val.ID == apiKeyID { - for _, r := range val.Roles { - temp := new(APIProjectAssignmentKeyInput) - if strings.HasPrefix(r.RoleName, "GROUP_") { - temp.ProjectID = r.GroupID - for _, l := range val.Roles { - if l.GroupID == temp.ProjectID { - temp.RoleNames = append(temp.RoleNames, l.RoleName) - } + for idx, role := range apiKeyOrgList.ApiKey.GetRoles() { + if !strings.HasPrefix(*role.RoleName, "ORG_") { + continue + } + roles := apiKeyOrgList.ApiKey.GetRoles() + orgKeys, _, err := connV2.ProgrammaticAPIKeysApi.ListApiKeys(ctx, *roles[idx].OrgId).Execute() + if err != nil { + return nil, fmt.Errorf("error getting api key information: %s", err) + } + for _, val := range orgKeys.GetResults() { + if val.GetId() == apiKeyID { + for _, r := range val.GetRoles() { + temp := new(APIProjectAssignmentKeyInput) + if strings.HasPrefix(r.GetRoleName(), "GROUP_") { + temp.ProjectID = r.GetGroupId() + for _, l := range val.GetRoles() { + if l.GetGroupId() == temp.ProjectID { + temp.RoleNames = append(temp.RoleNames, l.GetRoleName()) } - projectAssignments = append(projectAssignments, *temp) } + projectAssignments = append(projectAssignments, *temp) } } } - break } } return projectAssignments, nil diff --git a/internal/service/projectapikey/resource_project_api_key_test.go b/internal/service/projectapikey/resource_project_api_key_test.go index 8654cfcce7..481b3c89c8 100644 --- a/internal/service/projectapikey/resource_project_api_key_test.go +++ b/internal/service/projectapikey/resource_project_api_key_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - matlas "go.mongodb.org/atlas/mongodbatlas" ) const ( @@ -236,13 +235,13 @@ func TestAccProjectAPIKey_invalidRole(t *testing.T) { } func deleteAPIKeyManually(orgID, descriptionPrefix string) error { - list, _, err := acc.Conn().APIKeys.List(context.Background(), orgID, &matlas.ListOptions{}) + list, _, err := acc.ConnV2().ProgrammaticAPIKeysApi.ListApiKeys(context.Background(), orgID).Execute() if err != nil { return err } - for _, key := range list { - if strings.HasPrefix(key.Desc, descriptionPrefix) { - if _, err := acc.Conn().APIKeys.Delete(context.Background(), orgID, key.ID); err != nil { + for _, key := range list.GetResults() { + if strings.HasPrefix(key.GetDesc(), descriptionPrefix) { + if _, _, err := acc.ConnV2().ProgrammaticAPIKeysApi.DeleteApiKey(context.Background(), orgID, key.GetId()).Execute(); err != nil { return err } } @@ -256,13 +255,13 @@ func checkDestroy(projectID string) resource.TestCheckFunc { if rs.Type != "mongodbatlas_project_api_key" { continue } - projectAPIKeys, _, err := acc.Conn().ProjectAPIKeys.List(context.Background(), projectID, nil) + projectAPIKeys, _, err := acc.ConnV2().ProgrammaticAPIKeysApi.ListProjectApiKeys(context.Background(), projectID).Execute() if err != nil { return nil } ids := conversion.DecodeStateID(rs.Primary.ID) - for _, val := range projectAPIKeys { - if val.ID == ids["api_key_id"] { + for _, val := range projectAPIKeys.GetResults() { + if val.GetId() == ids["api_key_id"] { return fmt.Errorf("Project API Key (%s) still exists", ids["role_name"]) } } From 5578f1625926982854f05f872767a3b146522f76 Mon Sep 17 00:00:00 2001 From: Oriol Date: Fri, 19 Jul 2024 11:25:33 +0200 Subject: [PATCH 60/84] chore: Removes usage of old Admin SDK in tests (#2442) * remove matlas from alert_configuration test * remove matlas from custom_db_role test --- .../data_source_alert_configurations_test.go | 11 +- .../resource_custom_db_role_test.go | 236 +++++++++--------- 2 files changed, 121 insertions(+), 126 deletions(-) diff --git a/internal/service/alertconfiguration/data_source_alert_configurations_test.go b/internal/service/alertconfiguration/data_source_alert_configurations_test.go index 61bb45de90..e13bab4246 100644 --- a/internal/service/alertconfiguration/data_source_alert_configurations_test.go +++ b/internal/service/alertconfiguration/data_source_alert_configurations_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - matlas "go.mongodb.org/atlas/mongodbatlas" ) func TestAccConfigDSAlertConfigurations_basic(t *testing.T) { @@ -141,11 +140,7 @@ func checkCount(resourceName string) resource.TestCheckFunc { ids := conversion.DecodeStateID(rs.Primary.ID) projectID := ids["project_id"] - alertResp, _, err := acc.Conn().AlertConfigurations.List(context.Background(), projectID, &matlas.ListOptions{ - PageNum: 0, - ItemsPerPage: 100, - IncludeCount: true, - }) + alertResp, _, err := acc.ConnV2().AlertConfigurationsApi.ListAlertConfigurations(context.Background(), projectID).Execute() if err != nil { return fmt.Errorf("the Alert Configurations List for project (%s) could not be read", projectID) @@ -157,8 +152,8 @@ func checkCount(resourceName string) resource.TestCheckFunc { return fmt.Errorf("%s results count is somehow not a number %s", resourceName, resultsCountAttr) } - if resultsCount != len(alertResp) { - return fmt.Errorf("%s results count (%d) did not match that of current Alert Configurations (%d)", resourceName, resultsCount, len(alertResp)) + if resultsCount != len(alertResp.GetResults()) { + return fmt.Errorf("%s results count (%d) did not match that of current Alert Configurations (%d)", resourceName, resultsCount, len(alertResp.GetResults())) } if totalCountAttr := rs.Primary.Attributes["total_count"]; totalCountAttr != "" { diff --git a/internal/service/customdbrole/resource_custom_db_role_test.go b/internal/service/customdbrole/resource_custom_db_role_test.go index af2e6282b6..1dd4663c54 100644 --- a/internal/service/customdbrole/resource_custom_db_role_test.go +++ b/internal/service/customdbrole/resource_custom_db_role_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/spf13/cast" - matlas "go.mongodb.org/atlas/mongodbatlas" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) const resourceName = "mongodbatlas_custom_db_role.test" @@ -72,64 +72,64 @@ func TestAccConfigRSCustomDBRoles_WithInheritedRoles(t *testing.T) { projectName = acc.RandomProjectName() ) - inheritRole := []matlas.CustomDBRole{ + inheritRole := []admin.UserCustomDBRole{ { RoleName: acc.RandomName(), - Actions: []matlas.Action{{ + Actions: &[]admin.DatabasePrivilegeAction{{ Action: "INSERT", - Resources: []matlas.Resource{{ - DB: conversion.Pointer(acc.RandomClusterName()), + Resources: &[]admin.DatabasePermittedNamespaceResource{{ + Db: acc.RandomClusterName(), }}, }}, }, { RoleName: acc.RandomName(), - Actions: []matlas.Action{{ + Actions: &[]admin.DatabasePrivilegeAction{{ Action: "SERVER_STATUS", - Resources: []matlas.Resource{{ - Cluster: conversion.Pointer(true), + Resources: &[]admin.DatabasePermittedNamespaceResource{{ + Cluster: true, }}, }}, }, } - testRole := &matlas.CustomDBRole{ + testRole := &admin.UserCustomDBRole{ RoleName: acc.RandomName(), - Actions: []matlas.Action{{ + Actions: &[]admin.DatabasePrivilegeAction{{ Action: "UPDATE", - Resources: []matlas.Resource{{ - DB: conversion.Pointer(acc.RandomClusterName()), + Resources: &[]admin.DatabasePermittedNamespaceResource{{ + Db: acc.RandomClusterName(), }}, }}, } - inheritRoleUpdated := []matlas.CustomDBRole{ + inheritRoleUpdated := []admin.UserCustomDBRole{ { RoleName: inheritRole[0].RoleName, - Actions: []matlas.Action{{ + Actions: &[]admin.DatabasePrivilegeAction{{ Action: "FIND", - Resources: []matlas.Resource{{ - DB: conversion.Pointer(acc.RandomClusterName()), + Resources: &[]admin.DatabasePermittedNamespaceResource{{ + Db: acc.RandomClusterName(), }}, }}, }, { RoleName: inheritRole[1].RoleName, - Actions: []matlas.Action{{ + Actions: &[]admin.DatabasePrivilegeAction{{ Action: "CONN_POOL_STATS", - Resources: []matlas.Resource{{ - Cluster: conversion.Pointer(true), + Resources: &[]admin.DatabasePermittedNamespaceResource{{ + Cluster: true, }}, }}, }, } - testRoleUpdated := &matlas.CustomDBRole{ + testRoleUpdated := &admin.UserCustomDBRole{ RoleName: testRole.RoleName, - Actions: []matlas.Action{{ + Actions: &[]admin.DatabasePrivilegeAction{{ Action: "REMOVE", - Resources: []matlas.Resource{{ - DB: conversion.Pointer(acc.RandomClusterName()), + Resources: &[]admin.DatabasePermittedNamespaceResource{{ + Db: acc.RandomClusterName(), }}, }}, } @@ -148,25 +148,25 @@ func TestAccConfigRSCustomDBRoles_WithInheritedRoles(t *testing.T) { checkExists(InheritedRoleResourceNameOne), resource.TestCheckResourceAttrSet(InheritedRoleResourceNameOne, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "role_name", inheritRole[0].RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.#", cast.ToString(len(inheritRole[0].Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.action", inheritRole[0].Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.resources.#", cast.ToString(len(inheritRole[0].Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.#", cast.ToString(len(inheritRole[0].GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.action", inheritRole[0].GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.resources.#", cast.ToString(len(inheritRole[0].GetActions()[0].GetResources()))), // inherited Role [1] checkExists(InheritedRoleResourceNameTwo), resource.TestCheckResourceAttrSet(InheritedRoleResourceNameTwo, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "role_name", inheritRole[1].RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.#", cast.ToString(len(inheritRole[1].Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.action", inheritRole[1].Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.resources.#", cast.ToString(len(inheritRole[1].Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.#", cast.ToString(len(inheritRole[1].GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.action", inheritRole[1].GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.resources.#", cast.ToString(len(inheritRole[1].GetActions()[0].GetResources()))), // For Test Role checkExists(testRoleResourceName), resource.TestCheckResourceAttrSet(testRoleResourceName, "project_id"), resource.TestCheckResourceAttr(testRoleResourceName, "role_name", testRole.RoleName), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRole.Actions))), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRole.Actions[0].Action), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRole.Actions[0].Resources))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRole.GetActions()))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRole.GetActions()[0].Action), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRole.GetActions()[0].GetResources()))), resource.TestCheckResourceAttr(testRoleResourceName, "inherited_roles.#", "2"), ), }, @@ -179,25 +179,25 @@ func TestAccConfigRSCustomDBRoles_WithInheritedRoles(t *testing.T) { checkExists(InheritedRoleResourceNameOne), resource.TestCheckResourceAttrSet(InheritedRoleResourceNameOne, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "role_name", inheritRoleUpdated[0].RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.#", cast.ToString(len(inheritRoleUpdated[0].Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.action", inheritRoleUpdated[0].Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated[0].Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.#", cast.ToString(len(inheritRoleUpdated[0].GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.action", inheritRoleUpdated[0].GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceNameOne, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated[0].GetActions()[0].GetResources()))), // inherited Role [1] checkExists(InheritedRoleResourceNameTwo), resource.TestCheckResourceAttrSet(InheritedRoleResourceNameTwo, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "role_name", inheritRoleUpdated[1].RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.#", cast.ToString(len(inheritRoleUpdated[1].Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.action", inheritRoleUpdated[1].Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated[1].Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.#", cast.ToString(len(inheritRoleUpdated[1].GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.action", inheritRoleUpdated[1].GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceNameTwo, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated[1].GetActions()[0].GetResources()))), // For Test Role checkExists(testRoleResourceName), resource.TestCheckResourceAttrSet(testRoleResourceName, "project_id"), resource.TestCheckResourceAttr(testRoleResourceName, "role_name", testRoleUpdated.RoleName), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRoleUpdated.Actions))), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRoleUpdated.Actions[0].Action), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRoleUpdated.Actions[0].Resources))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRoleUpdated.GetActions()))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRoleUpdated.GetActions()[0].Action), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRoleUpdated.GetActions()[0].GetResources()))), resource.TestCheckResourceAttr(testRoleResourceName, "inherited_roles.#", "2"), ), }, @@ -213,55 +213,55 @@ func TestAccConfigRSCustomDBRoles_MultipleCustomRoles(t *testing.T) { projectName = acc.RandomProjectName() ) - inheritRole := &matlas.CustomDBRole{ + inheritRole := &admin.UserCustomDBRole{ RoleName: acc.RandomName(), - Actions: []matlas.Action{ + Actions: &[]admin.DatabasePrivilegeAction{ { Action: "REMOVE", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, { Action: "FIND", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, }, } - testRole := &matlas.CustomDBRole{ + testRole := &admin.UserCustomDBRole{ RoleName: acc.RandomName(), - Actions: []matlas.Action{ + Actions: &[]admin.DatabasePrivilegeAction{ { Action: "UPDATE", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, { Action: "INSERT", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, }, - InheritedRoles: []matlas.InheritedRole{ + InheritedRoles: &[]admin.DatabaseInheritedRole{ { Role: inheritRole.RoleName, Db: "admin", @@ -269,55 +269,55 @@ func TestAccConfigRSCustomDBRoles_MultipleCustomRoles(t *testing.T) { }, } - inheritRoleUpdated := &matlas.CustomDBRole{ + inheritRoleUpdated := &admin.UserCustomDBRole{ RoleName: inheritRole.RoleName, - Actions: []matlas.Action{ + Actions: &[]admin.DatabasePrivilegeAction{ { Action: "UPDATE", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, { Action: "FIND", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, { Action: "INSERT", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, }, } - testRoleUpdated := &matlas.CustomDBRole{ + testRoleUpdated := &admin.UserCustomDBRole{ RoleName: testRole.RoleName, - Actions: []matlas.Action{ + Actions: &[]admin.DatabasePrivilegeAction{ { Action: "REMOVE", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, }, - InheritedRoles: []matlas.InheritedRole{ + InheritedRoles: &[]admin.DatabaseInheritedRole{ { Role: inheritRole.RoleName, Db: "admin", @@ -338,17 +338,17 @@ func TestAccConfigRSCustomDBRoles_MultipleCustomRoles(t *testing.T) { checkExists(InheritedRoleResourceName), resource.TestCheckResourceAttrSet(InheritedRoleResourceName, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceName, "role_name", inheritRole.RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRole.Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRole.Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRole.Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRole.GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRole.GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRole.GetActions()[0].GetResources()))), // For Test Role checkExists(testRoleResourceName), resource.TestCheckResourceAttrSet(testRoleResourceName, "project_id"), resource.TestCheckResourceAttr(testRoleResourceName, "role_name", testRole.RoleName), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRole.Actions))), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRole.Actions[0].Action), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRole.Actions[0].Resources))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRole.GetActions()))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRole.GetActions()[0].Action), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRole.GetActions()[0].GetResources()))), ), }, { @@ -359,17 +359,17 @@ func TestAccConfigRSCustomDBRoles_MultipleCustomRoles(t *testing.T) { checkExists(InheritedRoleResourceName), resource.TestCheckResourceAttrSet(InheritedRoleResourceName, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceName, "role_name", inheritRoleUpdated.RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRoleUpdated.Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRoleUpdated.Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated.Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRoleUpdated.GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRoleUpdated.GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated.GetActions()[0].GetResources()))), // For Test Role checkExists(testRoleResourceName), resource.TestCheckResourceAttrSet(testRoleResourceName, "project_id"), resource.TestCheckResourceAttr(testRoleResourceName, "role_name", testRoleUpdated.RoleName), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRoleUpdated.Actions))), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRoleUpdated.Actions[0].Action), - resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRoleUpdated.Actions[0].Resources))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.#", cast.ToString(len(testRoleUpdated.GetActions()))), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.action", testRoleUpdated.GetActions()[0].Action), + resource.TestCheckResourceAttr(testRoleResourceName, "actions.0.resources.#", cast.ToString(len(testRoleUpdated.GetActions()[0].GetResources()))), resource.TestCheckResourceAttr(testRoleResourceName, "inherited_roles.#", "1"), ), }, @@ -416,70 +416,70 @@ func TestAccConfigRSCustomDBRoles_UpdatedInheritRoles(t *testing.T) { projectName = acc.RandomProjectName() ) - inheritRole := &matlas.CustomDBRole{ + inheritRole := &admin.UserCustomDBRole{ RoleName: acc.RandomName(), - Actions: []matlas.Action{ + Actions: &[]admin.DatabasePrivilegeAction{ { Action: "REMOVE", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, { Action: "FIND", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, }, } - inheritRoleUpdated := &matlas.CustomDBRole{ + inheritRoleUpdated := &admin.UserCustomDBRole{ RoleName: inheritRole.RoleName, - Actions: []matlas.Action{ + Actions: &[]admin.DatabasePrivilegeAction{ { Action: "UPDATE", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, { Action: "FIND", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, { Action: "INSERT", - Resources: []matlas.Resource{ + Resources: &[]admin.DatabasePermittedNamespaceResource{ { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, { - DB: conversion.Pointer(acc.RandomClusterName()), + Db: acc.RandomClusterName(), }, }, }, }, } - testRole := &matlas.CustomDBRole{ + testRole := &admin.UserCustomDBRole{ RoleName: acc.RandomName(), - InheritedRoles: []matlas.InheritedRole{ + InheritedRoles: &[]admin.DatabaseInheritedRole{ { Role: inheritRole.RoleName, Db: "admin", @@ -500,9 +500,9 @@ func TestAccConfigRSCustomDBRoles_UpdatedInheritRoles(t *testing.T) { checkExists(InheritedRoleResourceName), resource.TestCheckResourceAttrSet(InheritedRoleResourceName, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceName, "role_name", inheritRole.RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRole.Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRole.Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRole.Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRole.GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRole.GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRole.GetActions()[0].GetResources()))), // For Test Role checkExists(testRoleResourceName), @@ -520,9 +520,9 @@ func TestAccConfigRSCustomDBRoles_UpdatedInheritRoles(t *testing.T) { checkExists(InheritedRoleResourceName), resource.TestCheckResourceAttrSet(InheritedRoleResourceName, "project_id"), resource.TestCheckResourceAttr(InheritedRoleResourceName, "role_name", inheritRoleUpdated.RoleName), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRoleUpdated.Actions))), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRoleUpdated.Actions[0].Action), - resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated.Actions[0].Resources))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.#", cast.ToString(len(inheritRoleUpdated.GetActions()))), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.action", inheritRoleUpdated.GetActions()[0].Action), + resource.TestCheckResourceAttr(InheritedRoleResourceName, "actions.0.resources.#", cast.ToString(len(inheritRoleUpdated.GetActions()[0].GetResources()))), // For Test Role checkExists(testRoleResourceName), @@ -599,7 +599,7 @@ func configBasic(orgID, projectName, roleName, action, databaseName string) stri `, orgID, projectName, roleName, action, databaseName) } -func configWithInheritedRoles(orgID, projectName string, inheritedRole []matlas.CustomDBRole, testRole *matlas.CustomDBRole) string { +func configWithInheritedRoles(orgID, projectName string, inheritedRole []admin.UserCustomDBRole, testRole *admin.UserCustomDBRole) string { return fmt.Sprintf(` resource "mongodbatlas_project" "test" { @@ -654,30 +654,30 @@ func configWithInheritedRoles(orgID, projectName string, inheritedRole []matlas. } } `, orgID, projectName, - inheritedRole[0].RoleName, inheritedRole[0].Actions[0].Action, *inheritedRole[0].Actions[0].Resources[0].DB, - inheritedRole[1].RoleName, inheritedRole[1].Actions[0].Action, *inheritedRole[1].Actions[0].Resources[0].Cluster, - testRole.RoleName, testRole.Actions[0].Action, *testRole.Actions[0].Resources[0].DB, + inheritedRole[0].RoleName, inheritedRole[0].GetActions()[0].Action, inheritedRole[0].GetActions()[0].GetResources()[0].Db, + inheritedRole[1].RoleName, inheritedRole[1].GetActions()[0].Action, inheritedRole[1].GetActions()[0].GetResources()[0].Cluster, + testRole.RoleName, testRole.GetActions()[0].Action, testRole.GetActions()[0].GetResources()[0].Db, ) } -func configWithMultiple(orgID, projectName string, inheritedRole, testRole *matlas.CustomDBRole) string { - getCustomRoleFields := func(customRole *matlas.CustomDBRole) map[string]string { +func configWithMultiple(orgID, projectName string, inheritedRole, testRole *admin.UserCustomDBRole) string { + getCustomRoleFields := func(customRole *admin.UserCustomDBRole) map[string]string { var ( actions string inheritedRoles string ) - for _, a := range customRole.Actions { + for _, a := range customRole.GetActions() { var resources string // get the resources - for _, r := range a.Resources { + for _, r := range a.GetResources() { resources += fmt.Sprintf(` resources { collection_name = "" database_name = "%s" } - `, *r.DB) + `, r.Db) } // get the actions and set the resources @@ -689,7 +689,7 @@ func configWithMultiple(orgID, projectName string, inheritedRole, testRole *matl `, a.Action, resources) } - for _, in := range customRole.InheritedRoles { + for _, in := range customRole.GetInheritedRoles() { inheritedRoles += fmt.Sprintf(` inherited_roles { role_name = "%s" From 85d08fc6fc0ad22039ee33338c63bdada7afe189 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Fri, 19 Jul 2024 09:26:14 +0000 Subject: [PATCH 61/84] chore: Updates CHANGELOG.md for #2436 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f76d6703ac..cb2e5dac48 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## (Unreleased) +NOTES: + +* data-source/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) +* data-source/mongodbatlas_cloud_backup_snapshot_export_jobs: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) +* resource/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) + ## 1.17.4 (July 19, 2024) ENHANCEMENTS: From 9831293e74ba8331d8f943afe88e92b47102e783 Mon Sep 17 00:00:00 2001 From: Oriol Date: Tue, 23 Jul 2024 10:27:15 +0200 Subject: [PATCH 62/84] chore: Clean up usages of old SDK (#2449) * remove usages of old SDK * add az2 to vpc endpoint * Revert "add az2 to vpc endpoint" This reverts commit ce6f7cc09d4d31292479cc58dd3c5d9e92dd7738. * skip flaky test --- .../alertconfiguration/resource_alert_configuration.go | 4 ++-- .../resource_cloud_backup_snapshot_export_bucket.go | 4 ++-- .../resource_cloud_backup_snapshot_export_job.go | 4 ++-- .../resource_cloud_backup_snapshot_export_job_test.go | 2 +- internal/service/onlinearchive/resource_online_archive.go | 2 -- .../resource_private_endpoint_regional_mode_test.go | 5 +++-- .../resource_third_party_integration_test.go | 4 ++-- 7 files changed, 12 insertions(+), 13 deletions(-) diff --git a/internal/service/alertconfiguration/resource_alert_configuration.go b/internal/service/alertconfiguration/resource_alert_configuration.go index 29b72498f1..9840cacb37 100644 --- a/internal/service/alertconfiguration/resource_alert_configuration.go +++ b/internal/service/alertconfiguration/resource_alert_configuration.go @@ -520,7 +520,7 @@ func (r *alertConfigurationRS) Update(ctx context.Context, req resource.UpdateRe } func (r *alertConfigurationRS) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - conn := r.Client.Atlas + connV2 := r.Client.AtlasV2 var alertConfigState TfAlertConfigurationRSModel resp.Diagnostics.Append(req.State.Get(ctx, &alertConfigState)...) @@ -530,7 +530,7 @@ func (r *alertConfigurationRS) Delete(ctx context.Context, req resource.DeleteRe ids := conversion.DecodeStateID(alertConfigState.ID.ValueString()) - _, err := conn.AlertConfigurations.Delete(ctx, ids[EncodedIDKeyProjectID], ids[EncodedIDKeyAlertID]) + _, err := connV2.AlertConfigurationsApi.DeleteAlertConfiguration(ctx, ids[EncodedIDKeyProjectID], ids[EncodedIDKeyAlertID]).Execute() if err != nil { resp.Diagnostics.AddError(errorReadAlertConf, err.Error()) } diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go index 0da3e4a58f..9d704b4bd0 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go @@ -162,14 +162,14 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - conn := meta.(*config.MongoDBClient).Atlas + conn := meta.(*config.MongoDBClient).AtlasV2 projectID, id, err := splitImportID(d.Id()) if err != nil { return nil, err } - _, _, err = conn.CloudProviderSnapshotExportBuckets.Get(ctx, *projectID, *id) + _, _, err = conn.CloudBackupsApi.GetExportBucket(ctx, *projectID, *id).Execute() if err != nil { return nil, fmt.Errorf("couldn't import snapshot export bucket %s in project %s, error: %s", *id, *projectID, err) } diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go index 246d536df8..543f13cd7f 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go @@ -290,7 +290,7 @@ func expandExportJobCustomData(d *schema.ResourceData) *[]admin.BackupLabel { } func resourceImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - conn := meta.(*config.MongoDBClient).Atlas + connV2 := meta.(*config.MongoDBClient).AtlasV2 parts := strings.SplitN(d.Id(), "--", 3) if len(parts) != 3 { @@ -301,7 +301,7 @@ func resourceImportState(ctx context.Context, d *schema.ResourceData, meta any) clusterName := parts[1] exportID := parts[2] - _, _, err := conn.CloudProviderSnapshotExportJobs.Get(ctx, projectID, clusterName, exportID) + _, _, err := connV2.CloudBackupsApi.GetBackupExportJob(ctx, projectID, clusterName, exportID).Execute() if err != nil { return nil, fmt.Errorf("couldn't import snapshot export job %s in project %s and cluster %s, error: %s", exportID, projectID, clusterName, err) } diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go index 721b12c7c3..99125326f6 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go @@ -82,7 +82,7 @@ func checkExists(resourceName string) resource.TestCheckFunc { if err != nil { return err } - _, _, err = acc.Conn().CloudProviderSnapshotExportJobs.Get(context.Background(), projectID, clusterName, exportJobID) + _, _, err = acc.ConnV2().CloudBackupsApi.GetBackupExportJob(context.Background(), projectID, clusterName, exportJobID).Execute() if err == nil { return nil } diff --git a/internal/service/onlinearchive/resource_online_archive.go b/internal/service/onlinearchive/resource_online_archive.go index 5f9b17b12b..1e93c832d8 100644 --- a/internal/service/onlinearchive/resource_online_archive.go +++ b/internal/service/onlinearchive/resource_online_archive.go @@ -633,8 +633,6 @@ func mapCriteria(d *schema.ResourceData) admin.Criteria { } func mapSchedule(d *schema.ResourceData) *admin.OnlineArchiveSchedule { - // scheduleInput := &matlas.OnlineArchiveSchedule{ - // We have to provide schedule.type="DEFAULT" when the schedule block is not provided or removed scheduleInput := &admin.OnlineArchiveSchedule{ Type: scheduleTypeDefault, diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go index 93be48622b..27c490da81 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go @@ -17,6 +17,7 @@ func TestAccPrivateEndpointRegionalMode_basic(t *testing.T) { } func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { + acc.SkipTestForCI(t) // needs AWS configuration var ( endpointResourceSuffix = "atlasple" resourceSuffix = "atlasrm" @@ -168,7 +169,7 @@ func checkExists(resourceName string) resource.TestCheckFunc { return fmt.Errorf("no ID is set") } projectID := rs.Primary.ID - _, _, err := acc.Conn().PrivateEndpoints.GetRegionalizedPrivateEndpointSetting(context.Background(), projectID) + _, _, err := acc.ConnV2().PrivateEndpointServicesApi.GetRegionalizedPrivateEndpointSetting(context.Background(), projectID).Execute() if err == nil { return nil } @@ -181,7 +182,7 @@ func checkDestroy(s *terraform.State) error { if rs.Type != "mongodbatlas_private_endpoint_regional_mode" { continue } - setting, _, _ := acc.Conn().PrivateEndpoints.GetRegionalizedPrivateEndpointSetting(context.Background(), rs.Primary.ID) + setting, _, _ := acc.ConnV2().PrivateEndpointServicesApi.GetRegionalizedPrivateEndpointSetting(context.Background(), rs.Primary.ID).Execute() if setting != nil && setting.Enabled != false { return fmt.Errorf("Regionalized private endpoint setting for project %q was not properly disabled", rs.Primary.ID) } diff --git a/internal/service/thirdpartyintegration/resource_third_party_integration_test.go b/internal/service/thirdpartyintegration/resource_third_party_integration_test.go index a4f9f78105..c73f0f534c 100644 --- a/internal/service/thirdpartyintegration/resource_third_party_integration_test.go +++ b/internal/service/thirdpartyintegration/resource_third_party_integration_test.go @@ -342,7 +342,7 @@ func checkDestroy(s *terraform.State) error { if attrs["type"] == "" { return fmt.Errorf("no type is set") } - _, _, err := acc.Conn().Integrations.Get(context.Background(), attrs["project_id"], attrs["type"]) + _, _, err := acc.ConnV2().ThirdPartyIntegrationsApi.GetThirdPartyIntegration(context.Background(), attrs["project_id"], attrs["type"]).Execute() if err == nil { return fmt.Errorf("third party integration service (%s) still exists", attrs["type"]) } @@ -496,7 +496,7 @@ func checkExists(resourceName string) resource.TestCheckFunc { if attrs["type"] == "" { return fmt.Errorf("no type is set") } - if _, _, err := acc.Conn().Integrations.Get(context.Background(), attrs["project_id"], attrs["type"]); err == nil { + if _, _, err := acc.ConnV2().ThirdPartyIntegrationsApi.GetThirdPartyIntegration(context.Background(), attrs["project_id"], attrs["type"]).Execute(); err == nil { return nil } return fmt.Errorf("third party integration (%s) does not exist", attrs["project_id"]) From 12433b7fa592db58e577474032448a9d471b760e Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Tue, 23 Jul 2024 12:38:41 +0200 Subject: [PATCH 63/84] allow 0 (#2456) --- internal/service/advancedcluster/resource_advanced_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 2bb01aac6e..08165d1412 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -420,8 +420,8 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. // Validate oplog_size_mb to show the error before the cluster is created. if oplogSizeMB, ok := d.GetOkExists("advanced_configuration.0.oplog_size_mb"); ok { - if cast.ToInt64(oplogSizeMB) <= 0 { - return diag.FromErr(fmt.Errorf("`advanced_configuration.oplog_size_mb` cannot be <= 0")) + if cast.ToInt64(oplogSizeMB) < 0 { + return diag.FromErr(fmt.Errorf("`advanced_configuration.oplog_size_mb` cannot be < 0")) } } From 80dcc2d7c7fcea1a715cf0a25059800919ad6734 Mon Sep 17 00:00:00 2001 From: Oriol Date: Mon, 29 Jul 2024 09:29:21 +0200 Subject: [PATCH 64/84] fix: Fixes creation of organization (#2462) * fix TerraformVersion interface conversion * refactor organization resource * add changelog entry * PR comment --- .changelog/2462.txt | 3 ++ .../organization/data_source_organization.go | 5 ++- .../data_source_organization_test.go | 4 +-- .../organization/data_source_organizations.go | 5 ++- .../data_source_organizations_test.go | 8 ++--- .../organization/resource_organization.go | 28 +++++++-------- .../resource_organization_migration_test.go | 4 +-- .../resource_organization_test.go | 36 +++++++++---------- 8 files changed, 47 insertions(+), 46 deletions(-) create mode 100644 .changelog/2462.txt diff --git a/.changelog/2462.txt b/.changelog/2462.txt new file mode 100644 index 0000000000..7adb409197 --- /dev/null +++ b/.changelog/2462.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_organization: Fixes a bug in organization resource creation where the provider crashed. +``` diff --git a/internal/service/organization/data_source_organization.go b/internal/service/organization/data_source_organization.go index 6f08f3791c..9ff52aa828 100644 --- a/internal/service/organization/data_source_organization.go +++ b/internal/service/organization/data_source_organization.go @@ -13,7 +13,7 @@ import ( func DataSource() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasOrganizationRead, + ReadContext: dataSourceRead, Schema: map[string]*schema.Schema{ "org_id": { Type: schema.TypeString, @@ -59,8 +59,7 @@ func DataSource() *schema.Resource { } } -func dataSourceMongoDBAtlasOrganizationRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. +func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).AtlasV2 orgID := d.Get("org_id").(string) diff --git a/internal/service/organization/data_source_organization_test.go b/internal/service/organization/data_source_organization_test.go index 482b915cb3..e7926e8c26 100644 --- a/internal/service/organization/data_source_organization_test.go +++ b/internal/service/organization/data_source_organization_test.go @@ -19,7 +19,7 @@ func TestAccConfigDSOrganization_basic(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: testAccMongoDBAtlasOrganizationConfigWithDS(orgID), + Config: configWithDS(orgID), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(datasourceName, "name"), resource.TestCheckResourceAttrSet(datasourceName, "id"), @@ -31,7 +31,7 @@ func TestAccConfigDSOrganization_basic(t *testing.T) { }, }) } -func testAccMongoDBAtlasOrganizationConfigWithDS(orgID string) string { +func configWithDS(orgID string) string { config := fmt.Sprintf(` data "mongodbatlas_organization" "test" { diff --git a/internal/service/organization/data_source_organizations.go b/internal/service/organization/data_source_organizations.go index b1d209ef46..9ba3c1eabe 100644 --- a/internal/service/organization/data_source_organizations.go +++ b/internal/service/organization/data_source_organizations.go @@ -16,7 +16,7 @@ import ( func PluralDataSource() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasOrganizationsRead, + ReadContext: pluralDataSourceRead, Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -86,8 +86,7 @@ func PluralDataSource() *schema.Resource { } } -func dataSourceMongoDBAtlasOrganizationsRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. +func pluralDataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).AtlasV2 organizationOptions := &admin.ListOrganizationsApiParams{ diff --git a/internal/service/organization/data_source_organizations_test.go b/internal/service/organization/data_source_organizations_test.go index 5cd9e3a23a..9894031f63 100644 --- a/internal/service/organization/data_source_organizations_test.go +++ b/internal/service/organization/data_source_organizations_test.go @@ -17,7 +17,7 @@ func TestAccConfigDSOrganizations_basic(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: testAccMongoDBAtlasOrganizationsConfigWithDS(), + Config: configWithPluralDS(), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(datasourceName, "results.#"), resource.TestCheckResourceAttrSet(datasourceName, "results.0.name"), @@ -39,7 +39,7 @@ func TestAccConfigDSOrganizations_withPagination(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: testAccMongoDBAtlasOrganizationsConfigWithPagination(2, 5), + Config: configWithPagination(2, 5), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(datasourceName, "results.#"), ), @@ -48,14 +48,14 @@ func TestAccConfigDSOrganizations_withPagination(t *testing.T) { }) } -func testAccMongoDBAtlasOrganizationsConfigWithDS() string { +func configWithPluralDS() string { return ` data "mongodbatlas_organizations" "test" { } ` } -func testAccMongoDBAtlasOrganizationsConfigWithPagination(pageNum, itemPage int) string { +func configWithPagination(pageNum, itemPage int) string { return fmt.Sprintf(` data "mongodbatlas_organizations" "test" { page_num = %d diff --git a/internal/service/organization/resource_organization.go b/internal/service/organization/resource_organization.go index 6a7c38fc34..feaa210241 100644 --- a/internal/service/organization/resource_organization.go +++ b/internal/service/organization/resource_organization.go @@ -18,10 +18,10 @@ import ( func Resource() *schema.Resource { return &schema.Resource{ - CreateContext: resourceMongoDBAtlasOrganizationCreate, - ReadContext: resourceMongoDBAtlasOrganizationRead, - UpdateContext: resourceMongoDBAtlasOrganizationUpdate, - DeleteContext: resourceMongoDBAtlasOrganizationDelete, + CreateContext: resourceCreate, + ReadContext: resourceRead, + UpdateContext: resourceUpdate, + DeleteContext: resourceDelete, Importer: nil, // import is not supported. See CLOUDP-215155 Schema: map[string]*schema.Schema{ "org_owner_id": { @@ -80,7 +80,7 @@ func Resource() *schema.Resource { } } -func resourceMongoDBAtlasOrganizationCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { if err := ValidateAPIKeyIsOrgOwner(conversion.ExpandStringList(d.Get("role_names").(*schema.Set).List())); err != nil { return diag.FromErr(err) } @@ -104,7 +104,7 @@ func resourceMongoDBAtlasOrganizationCreate(ctx context.Context, d *schema.Resou PublicKey: *organization.ApiKey.PublicKey, PrivateKey: *organization.ApiKey.PrivateKey, BaseURL: meta.(*config.MongoDBClient).Config.BaseURL, - TerraformVersion: meta.(*config.Config).TerraformVersion, + TerraformVersion: meta.(*config.MongoDBClient).Config.TerraformVersion, } clients, _ := cfg.NewClient(ctx) @@ -136,16 +136,16 @@ func resourceMongoDBAtlasOrganizationCreate(ctx context.Context, d *schema.Resou "org_id": organization.Organization.GetId(), })) - return resourceMongoDBAtlasOrganizationRead(ctx, d, meta) + return resourceRead(ctx, d, meta) } -func resourceMongoDBAtlasOrganizationRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { // Get client connection. cfg := config.Config{ PublicKey: d.Get("public_key").(string), PrivateKey: d.Get("private_key").(string), BaseURL: meta.(*config.MongoDBClient).Config.BaseURL, - TerraformVersion: meta.(*config.Config).TerraformVersion, + TerraformVersion: meta.(*config.MongoDBClient).Config.TerraformVersion, } clients, _ := cfg.NewClient(ctx) @@ -189,13 +189,13 @@ func resourceMongoDBAtlasOrganizationRead(ctx context.Context, d *schema.Resourc return nil } -func resourceMongoDBAtlasOrganizationUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { // Get client connection. cfg := config.Config{ PublicKey: d.Get("public_key").(string), PrivateKey: d.Get("private_key").(string), BaseURL: meta.(*config.MongoDBClient).Config.BaseURL, - TerraformVersion: meta.(*config.Config).TerraformVersion, + TerraformVersion: meta.(*config.MongoDBClient).Config.TerraformVersion, } clients, _ := cfg.NewClient(ctx) @@ -218,16 +218,16 @@ func resourceMongoDBAtlasOrganizationUpdate(ctx context.Context, d *schema.Resou } } - return resourceMongoDBAtlasOrganizationRead(ctx, d, meta) + return resourceRead(ctx, d, meta) } -func resourceMongoDBAtlasOrganizationDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { // Get client connection. cfg := config.Config{ PublicKey: d.Get("public_key").(string), PrivateKey: d.Get("private_key").(string), BaseURL: meta.(*config.MongoDBClient).Config.BaseURL, - TerraformVersion: meta.(*config.Config).TerraformVersion, + TerraformVersion: meta.(*config.MongoDBClient).Config.TerraformVersion, } clients, _ := cfg.NewClient(ctx) diff --git a/internal/service/organization/resource_organization_migration_test.go b/internal/service/organization/resource_organization_migration_test.go index 469b3311be..03c771fadf 100644 --- a/internal/service/organization/resource_organization_migration_test.go +++ b/internal/service/organization/resource_organization_migration_test.go @@ -26,7 +26,7 @@ func TestMigConfigRSOrganization_Basic(t *testing.T) { Steps: []resource.TestStep{ { ExternalProviders: mig.ExternalProviders(), - Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, roleName), + Config: configBasic(orgOwnerID, name, description, roleName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttrSet(resourceName, "description"), @@ -35,7 +35,7 @@ func TestMigConfigRSOrganization_Basic(t *testing.T) { }, { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, roleName), + Config: configBasic(orgOwnerID, name, description, roleName), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ acc.DebugPlan(), diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index 22095111ab..20546ff96f 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -33,12 +33,12 @@ func TestAccConfigRSOrganization_Basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acc.PreCheck(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - CheckDestroy: testAccCheckMongoDBAtlasOrganizationDestroy, + CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, roleName), + Config: configBasic(orgOwnerID, name, description, roleName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckMongoDBAtlasOrganizationExists(resourceName), + checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "api_access_list_required", "false"), @@ -47,9 +47,9 @@ func TestAccConfigRSOrganization_Basic(t *testing.T) { ), }, { - Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, updatedName, description, roleName), + Config: configBasic(orgOwnerID, updatedName, description, roleName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckMongoDBAtlasOrganizationExists(resourceName), + checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "api_access_list_required", "false"), @@ -74,10 +74,10 @@ func TestAccConfigRSOrganization_BasicAccess(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acc.PreCheck(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - CheckDestroy: testAccCheckMongoDBAtlasOrganizationDestroy, + CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, roleName), + Config: configBasic(orgOwnerID, name, description, roleName), ExpectError: regexp.MustCompile("API Key must have the ORG_OWNER role"), }, }, @@ -106,12 +106,12 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acc.PreCheck(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - CheckDestroy: testAccCheckMongoDBAtlasOrganizationDestroy, + CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: testAccMongoDBAtlasOrganizationConfigWithSettings(orgOwnerID, name, description, roleName, settingsConfig), + Config: configWithSettings(orgOwnerID, name, description, roleName, settingsConfig), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckMongoDBAtlasOrganizationExists(resourceName), + checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "api_access_list_required", "false"), @@ -120,9 +120,9 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { ), }, { - Config: testAccMongoDBAtlasOrganizationConfigWithSettings(orgOwnerID, name, description, roleName, settingsConfigUpdated), + Config: configWithSettings(orgOwnerID, name, description, roleName, settingsConfigUpdated), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckMongoDBAtlasOrganizationExists(resourceName), + checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "api_access_list_required", "false"), @@ -131,9 +131,9 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { ), }, { - Config: testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, "org-name-updated", description, roleName), + Config: configBasic(orgOwnerID, "org-name-updated", description, roleName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckMongoDBAtlasOrganizationExists(resourceName), + checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "org_id"), resource.TestCheckResourceAttrSet(resourceName, "description"), resource.TestCheckResourceAttr(resourceName, "description", description), @@ -143,7 +143,7 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { }) } -func testAccCheckMongoDBAtlasOrganizationExists(resourceName string) resource.TestCheckFunc { +func checkExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -174,7 +174,7 @@ func testAccCheckMongoDBAtlasOrganizationExists(resourceName string) resource.Te } } -func testAccCheckMongoDBAtlasOrganizationDestroy(s *terraform.State) error { +func checkDestroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "mongodbatlas_organization" { continue @@ -200,7 +200,7 @@ func testAccCheckMongoDBAtlasOrganizationDestroy(s *terraform.State) error { return nil } -func testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, roleNames string) string { +func configBasic(orgOwnerID, name, description, roleNames string) string { return fmt.Sprintf(` resource "mongodbatlas_organization" "test" { org_owner_id = "%s" @@ -211,7 +211,7 @@ func testAccMongoDBAtlasOrganizationConfigBasic(orgOwnerID, name, description, r `, orgOwnerID, name, description, roleNames) } -func testAccMongoDBAtlasOrganizationConfigWithSettings(orgOwnerID, name, description, roleNames, settingsConfig string) string { +func configWithSettings(orgOwnerID, name, description, roleNames, settingsConfig string) string { return fmt.Sprintf(` resource "mongodbatlas_organization" "test" { org_owner_id = "%s" From 7d3573974be666b2dffb8b68f6e29f5ca75b2a74 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Mon, 29 Jul 2024 07:31:14 +0000 Subject: [PATCH 65/84] chore: Updates CHANGELOG.md for #2462 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb2e5dac48..50e98d0ba8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,10 @@ NOTES: * data-source/mongodbatlas_cloud_backup_snapshot_export_jobs: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) * resource/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) +BUG FIXES: + +* resource/mongodbatlas_organization: Fixes a bug in organization resource creation where the provider crashed. ([#2462](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2462)) + ## 1.17.4 (July 19, 2024) ENHANCEMENTS: From 7fdbae1202698506cefb67280d245d737266b7cc Mon Sep 17 00:00:00 2001 From: Oriol Date: Tue, 30 Jul 2024 15:39:35 +0200 Subject: [PATCH 66/84] fix: Fixes nil pointer dereference in `mongodbatlas_alert_configuration` (#2463) * fix nil pointer dereference * avoid nil pointer dereference in metric_threshold_config * changelog entry * changelog suggestion * Update .changelog/2463.txt Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> * remove periods at the end of changelog entries to make it consistent --------- Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> --- .changelog/2436.txt | 6 ++-- .changelog/2462.txt | 2 +- .changelog/2463.txt | 3 ++ .../model_alert_configuration.go | 28 +++++++++---------- 4 files changed, 21 insertions(+), 18 deletions(-) create mode 100644 .changelog/2463.txt diff --git a/.changelog/2436.txt b/.changelog/2436.txt index ee4fe558d3..347e6ddb71 100644 --- a/.changelog/2436.txt +++ b/.changelog/2436.txt @@ -1,11 +1,11 @@ ```release-note:note -resource/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. +resource/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute ``` ```release-note:note -data-source/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. +data-source/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute ``` ```release-note:note -data-source/mongodbatlas_cloud_backup_snapshot_export_jobs: Deprecates the `err_msg` attribute. +data-source/mongodbatlas_cloud_backup_snapshot_export_jobs: Deprecates the `err_msg` attribute ``` diff --git a/.changelog/2462.txt b/.changelog/2462.txt index 7adb409197..588a6e8d3b 100644 --- a/.changelog/2462.txt +++ b/.changelog/2462.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/mongodbatlas_organization: Fixes a bug in organization resource creation where the provider crashed. +resource/mongodbatlas_organization: Fixes a bug in organization resource creation where the provider crashed ``` diff --git a/.changelog/2463.txt b/.changelog/2463.txt new file mode 100644 index 0000000000..9c4edff18e --- /dev/null +++ b/.changelog/2463.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_alert_configuration: Fixes an issue where the `terraform apply` command crashes if you attempt to edit an existing `mongodbatlas_alert_configuration` by adding a value to `threshold_config` +``` diff --git a/internal/service/alertconfiguration/model_alert_configuration.go b/internal/service/alertconfiguration/model_alert_configuration.go index ef42960051..ab2b50c2ba 100644 --- a/internal/service/alertconfiguration/model_alert_configuration.go +++ b/internal/service/alertconfiguration/model_alert_configuration.go @@ -200,28 +200,28 @@ func NewTFMetricThresholdConfigModel(t *admin.ServerlessMetricThreshold, currSta return []TfMetricThresholdConfigModel{ { MetricName: conversion.StringNullIfEmpty(t.MetricName), - Operator: conversion.StringNullIfEmpty(*t.Operator), - Threshold: types.Float64Value(*t.Threshold), - Units: conversion.StringNullIfEmpty(*t.Units), - Mode: conversion.StringNullIfEmpty(*t.Mode), + Operator: conversion.StringNullIfEmpty(t.GetOperator()), + Threshold: types.Float64Value(t.GetThreshold()), + Units: conversion.StringNullIfEmpty(t.GetUnits()), + Mode: conversion.StringNullIfEmpty(t.GetMode()), }, } } currState := currStateSlice[0] newState := TfMetricThresholdConfigModel{ - Threshold: types.Float64Value(*t.Threshold), + Threshold: types.Float64Value(t.GetThreshold()), } if !currState.MetricName.IsNull() { newState.MetricName = conversion.StringNullIfEmpty(t.MetricName) } if !currState.Operator.IsNull() { - newState.Operator = conversion.StringNullIfEmpty(*t.Operator) + newState.Operator = conversion.StringNullIfEmpty(t.GetOperator()) } if !currState.Units.IsNull() { - newState.Units = conversion.StringNullIfEmpty(*t.Units) + newState.Units = conversion.StringNullIfEmpty(t.GetUnits()) } if !currState.Mode.IsNull() { - newState.Mode = conversion.StringNullIfEmpty(*t.Mode) + newState.Mode = conversion.StringNullIfEmpty(t.GetMode()) } return []TfMetricThresholdConfigModel{newState} } @@ -234,21 +234,21 @@ func NewTFThresholdConfigModel(t *admin.GreaterThanRawThreshold, currStateSlice if len(currStateSlice) == 0 { // threshold was created elsewhere from terraform, or import statement is being called return []TfThresholdConfigModel{ { - Operator: conversion.StringNullIfEmpty(*t.Operator), - Threshold: types.Float64Value(float64(*t.Threshold)), // int in new SDK but keeping float64 for backward compatibility - Units: conversion.StringNullIfEmpty(*t.Units), + Operator: conversion.StringNullIfEmpty(t.GetOperator()), + Threshold: types.Float64Value(float64(t.GetThreshold())), // int in new SDK but keeping float64 for backward compatibility + Units: conversion.StringNullIfEmpty(t.GetUnits()), }, } } currState := currStateSlice[0] newState := TfThresholdConfigModel{} if !currState.Operator.IsNull() { - newState.Operator = conversion.StringNullIfEmpty(*t.Operator) + newState.Operator = conversion.StringNullIfEmpty(t.GetOperator()) } if !currState.Units.IsNull() { - newState.Units = conversion.StringNullIfEmpty(*t.Units) + newState.Units = conversion.StringNullIfEmpty(t.GetUnits()) } - newState.Threshold = types.Float64Value(float64(*t.Threshold)) + newState.Threshold = types.Float64Value(float64(t.GetThreshold())) return []TfThresholdConfigModel{newState} } From 8fdf420fedd7d6d87c2f3082404536c71fd40cb8 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Tue, 30 Jul 2024 13:41:32 +0000 Subject: [PATCH 67/84] chore: Updates CHANGELOG.md for #2463 --- CHANGELOG.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50e98d0ba8..fcf2b769db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,13 +2,14 @@ NOTES: -* data-source/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) -* data-source/mongodbatlas_cloud_backup_snapshot_export_jobs: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) -* resource/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute. ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) +* data-source/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) +* data-source/mongodbatlas_cloud_backup_snapshot_export_jobs: Deprecates the `err_msg` attribute ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) +* resource/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) BUG FIXES: -* resource/mongodbatlas_organization: Fixes a bug in organization resource creation where the provider crashed. ([#2462](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2462)) +* resource/mongodbatlas_alert_configuration: Fixes an issue where the `terraform apply` command crashes if you attempt to edit an existing `mongodbatlas_alert_configuration` by adding a value to `threshold_config` ([#2463](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2463)) +* resource/mongodbatlas_organization: Fixes a bug in organization resource creation where the provider crashed ([#2462](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2462)) ## 1.17.4 (July 19, 2024) From b6dadfb173a5b530310211ae747ac1634ac78880 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Tue, 30 Jul 2024 13:45:57 +0000 Subject: [PATCH 68/84] chore: Updates examples link in index.md for v1.17.5 release --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 36ddadaf0c..75297df245 100644 --- a/docs/index.md +++ b/docs/index.md @@ -219,7 +219,7 @@ We ship binaries but do not prioritize fixes for the following operating system ## Examples from MongoDB and the Community -We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.4/examples) +We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.5/examples) in our GitHub repo that will help both beginner and more advanced users. Have a good example you've created and want to share? From 2fe6306c158158bb77635c6acdf382062701404d Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Tue, 30 Jul 2024 13:46:18 +0000 Subject: [PATCH 69/84] chore: Updates CHANGELOG.md header for v1.17.5 release --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fcf2b769db..74ce0796ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ ## (Unreleased) +## 1.17.5 (July 30, 2024) + NOTES: * data-source/mongodbatlas_cloud_backup_snapshot_export_job: Deprecates the `err_msg` attribute ([#2436](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2436)) From 8487a3d6d5daf09dc367e48949ef7e18c1b3e456 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 08:25:41 +0200 Subject: [PATCH 70/84] chore: Bump golangci/golangci-lint-action from 6.0.1 to 6.1.0 (#2469) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.0.1 to 6.1.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/a4f60bb28d35aeee14e6880718e0c85ff1882e64...aaa42aa0628b4ae2578232a66b541047968fac86) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/code-health.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index 0be3c9f1f1..a6930734e1 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -47,7 +47,7 @@ jobs: go-version-file: 'go.mod' cache: false # see https://github.com/golangci/golangci-lint-action/issues/807 - name: golangci-lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 with: version: v1.59.1 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version - name: actionlint From 94371a9114a09c24436123254a527c89196e5d40 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 Jul 2024 08:26:02 +0200 Subject: [PATCH 71/84] chore: Bump github.com/aws/aws-sdk-go from 1.54.19 to 1.55.5 (#2468) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.54.19 to 1.55.5. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.54.19...v1.55.5) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8172cf6b8b..2aed9486b3 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 - github.com/aws/aws-sdk-go v1.54.19 + github.com/aws/aws-sdk-go v1.55.5 github.com/hashicorp/go-changelog v0.0.0-20240318095659-4d68c58a6e7f github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-version v1.7.0 diff --git a/go.sum b/go.sum index dae41f7c4e..06c8ddeaea 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= From 99e9212579ee8fca9d40ff2d4344eb57ef8ed82f Mon Sep 17 00:00:00 2001 From: Oriol Date: Wed, 7 Aug 2024 09:30:13 +0200 Subject: [PATCH 72/84] fix: Handles update of `mongodbatlas_backup_compliance_policy` as a create operation (#2480) * handle update as a create * add test to make sure no plan changes appear when reapplying config with non default values * add changelog * fix projectId * fix name of resource in test * Update .changelog/2480.txt Co-authored-by: kyuan-mongodb <78768401+kyuan-mongodb@users.noreply.github.com> --------- Co-authored-by: kyuan-mongodb <78768401+kyuan-mongodb@users.noreply.github.com> --- .changelog/2480.txt | 3 + .../resource_backup_compliance_policy.go | 253 ++++++------------ .../resource_backup_compliance_policy_test.go | 81 ++++++ 3 files changed, 169 insertions(+), 168 deletions(-) create mode 100644 .changelog/2480.txt diff --git a/.changelog/2480.txt b/.changelog/2480.txt new file mode 100644 index 0000000000..9474013e4e --- /dev/null +++ b/.changelog/2480.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_backup_compliance_policy: Fixes an issue where the update operation modified attributes that were not supposed to be modified" +``` diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go index b542c87056..8e5a4d2986 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go @@ -263,85 +263,8 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) - dataProtectionSettings := &admin.DataProtectionSettings20231001{ - ProjectId: conversion.StringPtr(projectID), - AuthorizedEmail: d.Get("authorized_email").(string), - AuthorizedUserFirstName: d.Get("authorized_user_first_name").(string), - AuthorizedUserLastName: d.Get("authorized_user_last_name").(string), - CopyProtectionEnabled: conversion.Pointer(d.Get("copy_protection_enabled").(bool)), - EncryptionAtRestEnabled: conversion.Pointer(d.Get("encryption_at_rest_enabled").(bool)), - PitEnabled: conversion.Pointer(d.Get("pit_enabled").(bool)), - RestoreWindowDays: conversion.Pointer(cast.ToInt(d.Get("restore_window_days"))), - OnDemandPolicyItem: expandDemandBackupPolicyItem(d), - } + err := updateOrCreateDataProtectionSetting(ctx, d, connV2, projectID) - var backupPoliciesItem []admin.BackupComplianceScheduledPolicyItem - if v, ok := d.GetOk("policy_item_hourly"); ok { - item := v.([]any) - itemObj := item[0].(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Hourly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - if v, ok := d.GetOk("policy_item_daily"); ok { - item := v.([]any) - itemObj := item[0].(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Daily, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - if v, ok := d.GetOk("policy_item_weekly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Weekly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - } - if v, ok := d.GetOk("policy_item_monthly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Monthly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - } - if v, ok := d.GetOk("policy_item_yearly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Yearly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - } - if len(backupPoliciesItem) > 0 { - dataProtectionSettings.ScheduledPolicyItems = &backupPoliciesItem - } - - params := admin.UpdateDataProtectionSettingsApiParams{ - GroupId: projectID, - DataProtectionSettings20231001: dataProtectionSettings, - OverwriteBackupPolicies: conversion.Pointer(false), - } - _, _, err := connV2.CloudBackupsApi.UpdateDataProtectionSettingsWithParams(ctx, ¶ms).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorBackupPolicyUpdate, projectID, err)) } @@ -444,97 +367,8 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] - dataProtectionSettings := &admin.DataProtectionSettings20231001{ - ProjectId: conversion.StringPtr(projectID), - AuthorizedEmail: d.Get("authorized_email").(string), - AuthorizedUserFirstName: d.Get("authorized_user_first_name").(string), - AuthorizedUserLastName: d.Get("authorized_user_last_name").(string), - OnDemandPolicyItem: expandDemandBackupPolicyItem(d), - } - - if d.HasChange("copy_protection_enabled") { - dataProtectionSettings.CopyProtectionEnabled = conversion.Pointer(d.Get("copy_protection_enabled").(bool)) - } - - if d.HasChange("encryption_at_rest_enabled") { - dataProtectionSettings.EncryptionAtRestEnabled = conversion.Pointer(d.Get("encryption_at_rest_enabled").(bool)) - } - - if d.HasChange("pit_enabled") { - dataProtectionSettings.PitEnabled = conversion.Pointer(d.Get("pit_enabled").(bool)) - } - - if d.HasChange("restore_window_days") { - dataProtectionSettings.RestoreWindowDays = conversion.Pointer(cast.ToInt(d.Get("restore_window_days"))) - } + err := updateOrCreateDataProtectionSetting(ctx, d, connV2, projectID) - var backupPoliciesItem []admin.BackupComplianceScheduledPolicyItem - if v, ok := d.GetOk("policy_item_hourly"); ok { - item := v.([]any) - itemObj := item[0].(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Hourly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - if v, ok := d.GetOk("policy_item_daily"); ok { - item := v.([]any) - itemObj := item[0].(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Daily, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - if v, ok := d.GetOk("policy_item_weekly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Weekly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - } - if v, ok := d.GetOk("policy_item_monthly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Monthly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - } - if v, ok := d.GetOk("policy_item_yearly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ - FrequencyType: cloudbackupschedule.Yearly, - RetentionUnit: itemObj["retention_unit"].(string), - FrequencyInterval: itemObj["frequency_interval"].(int), - RetentionValue: itemObj["retention_value"].(int), - }) - } - } - if len(backupPoliciesItem) > 0 { - dataProtectionSettings.ScheduledPolicyItems = &backupPoliciesItem - } - - params := admin.UpdateDataProtectionSettingsApiParams{ - GroupId: projectID, - DataProtectionSettings20231001: dataProtectionSettings, - OverwriteBackupPolicies: conversion.Pointer(false), - } - _, _, err := connV2.CloudBackupsApi.UpdateDataProtectionSettingsWithParams(ctx, ¶ms).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorBackupPolicyUpdate, projectID, err)) } @@ -622,3 +456,86 @@ func flattenBackupPolicyItems(items []admin.BackupComplianceScheduledPolicyItem, } return policyItems } + +func updateOrCreateDataProtectionSetting(ctx context.Context, d *schema.ResourceData, connV2 *admin.APIClient, projectID string) error { + dataProtectionSettings := &admin.DataProtectionSettings20231001{ + ProjectId: conversion.StringPtr(projectID), + AuthorizedEmail: d.Get("authorized_email").(string), + AuthorizedUserFirstName: d.Get("authorized_user_first_name").(string), + AuthorizedUserLastName: d.Get("authorized_user_last_name").(string), + CopyProtectionEnabled: conversion.Pointer(d.Get("copy_protection_enabled").(bool)), + EncryptionAtRestEnabled: conversion.Pointer(d.Get("encryption_at_rest_enabled").(bool)), + PitEnabled: conversion.Pointer(d.Get("pit_enabled").(bool)), + RestoreWindowDays: conversion.Pointer(cast.ToInt(d.Get("restore_window_days"))), + OnDemandPolicyItem: expandDemandBackupPolicyItem(d), + } + + var backupPoliciesItem []admin.BackupComplianceScheduledPolicyItem + if v, ok := d.GetOk("policy_item_hourly"); ok { + item := v.([]any) + itemObj := item[0].(map[string]any) + backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ + FrequencyType: cloudbackupschedule.Hourly, + RetentionUnit: itemObj["retention_unit"].(string), + FrequencyInterval: itemObj["frequency_interval"].(int), + RetentionValue: itemObj["retention_value"].(int), + }) + } + if v, ok := d.GetOk("policy_item_daily"); ok { + item := v.([]any) + itemObj := item[0].(map[string]any) + backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ + FrequencyType: cloudbackupschedule.Daily, + RetentionUnit: itemObj["retention_unit"].(string), + FrequencyInterval: itemObj["frequency_interval"].(int), + RetentionValue: itemObj["retention_value"].(int), + }) + } + if v, ok := d.GetOk("policy_item_weekly"); ok { + items := v.([]any) + for _, s := range items { + itemObj := s.(map[string]any) + backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ + FrequencyType: cloudbackupschedule.Weekly, + RetentionUnit: itemObj["retention_unit"].(string), + FrequencyInterval: itemObj["frequency_interval"].(int), + RetentionValue: itemObj["retention_value"].(int), + }) + } + } + if v, ok := d.GetOk("policy_item_monthly"); ok { + items := v.([]any) + for _, s := range items { + itemObj := s.(map[string]any) + backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ + FrequencyType: cloudbackupschedule.Monthly, + RetentionUnit: itemObj["retention_unit"].(string), + FrequencyInterval: itemObj["frequency_interval"].(int), + RetentionValue: itemObj["retention_value"].(int), + }) + } + } + if v, ok := d.GetOk("policy_item_yearly"); ok { + items := v.([]any) + for _, s := range items { + itemObj := s.(map[string]any) + backupPoliciesItem = append(backupPoliciesItem, admin.BackupComplianceScheduledPolicyItem{ + FrequencyType: cloudbackupschedule.Yearly, + RetentionUnit: itemObj["retention_unit"].(string), + FrequencyInterval: itemObj["frequency_interval"].(int), + RetentionValue: itemObj["retention_value"].(int), + }) + } + } + if len(backupPoliciesItem) > 0 { + dataProtectionSettings.ScheduledPolicyItems = &backupPoliciesItem + } + + params := admin.UpdateDataProtectionSettingsApiParams{ + GroupId: projectID, + DataProtectionSettings20231001: dataProtectionSettings, + OverwriteBackupPolicies: conversion.Pointer(false), + } + _, _, err := connV2.CloudBackupsApi.UpdateDataProtectionSettingsWithParams(ctx, ¶ms).Execute() + return err +} diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go index 310793e882..bc99b374e0 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" @@ -116,6 +117,41 @@ func TestAccBackupCompliancePolicy_withoutRestoreWindowDays(t *testing.T) { }) } +func TestAccBackupCompliancePolicy_UpdateSetsAllAttributes(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid conflicts with backup compliance policy + projectOwnerID = os.Getenv("MONGODB_ATLAS_PROJECT_OWNER_ID") + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Steps: []resource.TestStep{ + { + Config: configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID), + Check: resource.ComposeAggregateTestCheckFunc( + checkExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "authorized_user_first_name", "First"), + resource.TestCheckResourceAttr(resourceName, "authorized_user_last_name", "Last"), + resource.TestCheckResourceAttr(resourceName, "pit_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "encryption_at_rest_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "copy_protection_enabled", "true"), + ), + }, + { + Config: configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acc.DebugPlan(), + plancheck.ExpectEmptyPlan(), + }, + }, + }, + }, + }) +} + func basicTestCase(tb testing.TB, useYearly bool) *resource.TestCase { tb.Helper() @@ -419,3 +455,48 @@ func basicChecks() []resource.TestCheckFunc { checks = append(checks, checkExists(resourceName), checkExists(dataSourceName)) return checks } + +func configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID string) string { + return acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false) + + `resource "mongodbatlas_backup_compliance_policy" "backup_policy_res" { + project_id = mongodbatlas_project.test.id + authorized_email = "test@example.com" + authorized_user_first_name = "First" + authorized_user_last_name = "Last" + copy_protection_enabled = true + pit_enabled = false + encryption_at_rest_enabled = false + + restore_window_days = 7 + + on_demand_policy_item { + frequency_interval = 0 + retention_unit = "days" + retention_value = 3 + } + + policy_item_hourly { + frequency_interval = 6 + retention_unit = "days" + retention_value = 7 + } + + policy_item_daily { + frequency_interval = 0 + retention_unit = "days" + retention_value = 7 + } + + policy_item_weekly { + frequency_interval = 0 + retention_unit = "weeks" + retention_value = 4 + } + + policy_item_monthly { + frequency_interval = 0 + retention_unit = "months" + retention_value = 12 + } + }` +} From 8b072200aab19b530739b91ae3e2c63bf538ab50 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Wed, 7 Aug 2024 07:32:04 +0000 Subject: [PATCH 73/84] chore: Updates CHANGELOG.md for #2480 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74ce0796ea..a9900ad3a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## (Unreleased) +BUG FIXES: + +* resource/mongodbatlas_backup_compliance_policy: Fixes an issue where the update operation modified attributes that were not supposed to be modified" ([#2480](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2480)) + ## 1.17.5 (July 30, 2024) NOTES: From f9fea402f12065ec16d40a98e2f9609ca7daf3e1 Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Wed, 7 Aug 2024 08:27:33 +0000 Subject: [PATCH 74/84] chore: Updates examples link in index.md for v1.17.6 release --- docs/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.md b/docs/index.md index 75297df245..06f666ecc1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -219,7 +219,7 @@ We ship binaries but do not prioritize fixes for the following operating system ## Examples from MongoDB and the Community -We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.5/examples) +We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.6/examples) in our GitHub repo that will help both beginner and more advanced users. Have a good example you've created and want to share? From df074dee671ca863bbd79a8e5df7dac05257bb5a Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Wed, 7 Aug 2024 08:28:01 +0000 Subject: [PATCH 75/84] chore: Updates CHANGELOG.md header for v1.17.6 release --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9900ad3a2..ef845524bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ ## (Unreleased) +## 1.17.6 (August 07, 2024) + BUG FIXES: * resource/mongodbatlas_backup_compliance_policy: Fixes an issue where the update operation modified attributes that were not supposed to be modified" ([#2480](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2480)) From f2c7835fce770d81b44e089ca6be43d7f4733c52 Mon Sep 17 00:00:00 2001 From: Oriol Date: Thu, 8 Aug 2024 14:12:46 +0200 Subject: [PATCH 76/84] feat: Adds azure support for backup snapshot export bucket (#2486) * feat: add azure support for backup snapshot export bucket * fix: add acceptance test configuration * fix changelog entry number * upgrade azuread to 2.53.1 in example * fix checks * fix checks for mongodbatlas_access_list_api_key * fix docs check * fix docs check for data source * add readme.md in examples * use acc.AddAttrChecks in tests * remove importstateverifyignore --------- Co-authored-by: Luiz Viana --- .changelog/2486.txt | 11 ++ .github/workflows/acceptance-tests-runner.yml | 9 + .github/workflows/acceptance-tests.yml | 2 + .../cloud_backup_snapshot_export_bucket.md | 10 +- .../cloud_backup_snapshot_export_buckets.md | 9 +- .../cloud_backup_snapshot_export_bucket.md | 25 ++- .../aws/README.md | 16 ++ .../{ => aws}/aws-roles.tf | 0 .../{ => aws}/main.tf | 0 .../{ => aws}/provider.tf | 0 .../{ => aws}/variables.tf | 0 .../{ => aws}/versions.tf | 0 .../azure/README.md | 21 +++ .../azure/azure.tf | 30 ++++ .../azure/main.tf | 30 ++++ .../azure/provider.tf | 15 ++ .../azure/variables.tf | 40 +++++ .../azure/versions.tf | 17 ++ ...rce_cloud_backup_snapshot_export_bucket.go | 24 +++ ...ce_cloud_backup_snapshot_export_buckets.go | 15 ++ ...rce_cloud_backup_snapshot_export_bucket.go | 35 +++- ...p_snapshot_export_bucket_migration_test.go | 2 +- ...loud_backup_snapshot_export_bucket_test.go | 167 +++++++++++++++--- internal/testutil/acc/pre_check.go | 8 + templates/data-source.md.tmpl | 2 + templates/resources.md.tmpl | 2 + 26 files changed, 449 insertions(+), 41 deletions(-) create mode 100644 .changelog/2486.txt create mode 100644 examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/README.md rename examples/mongodbatlas_cloud_backup_snapshot_export_bucket/{ => aws}/aws-roles.tf (100%) rename examples/mongodbatlas_cloud_backup_snapshot_export_bucket/{ => aws}/main.tf (100%) rename examples/mongodbatlas_cloud_backup_snapshot_export_bucket/{ => aws}/provider.tf (100%) rename examples/mongodbatlas_cloud_backup_snapshot_export_bucket/{ => aws}/variables.tf (100%) rename examples/mongodbatlas_cloud_backup_snapshot_export_bucket/{ => aws}/versions.tf (100%) create mode 100644 examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/README.md create mode 100644 examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/azure.tf create mode 100644 examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/main.tf create mode 100644 examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/provider.tf create mode 100644 examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/variables.tf create mode 100644 examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/versions.tf diff --git a/.changelog/2486.txt b/.changelog/2486.txt new file mode 100644 index 0000000000..643464db5b --- /dev/null +++ b/.changelog/2486.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +data-source/mongodbatlas_cloud_backup_snapshot_export_bucket: Adds Azure support +``` + +```release-note:enhancement +resource/mongodbatlas_cloud_backup_snapshot_export_bucket: Adds Azure support +``` + +```release-note:enhancement +data-source/mongodbatlas_cloud_backup_snapshot_export_buckets: Adds Azure support +``` diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index 7a69c08827..136c693902 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -103,6 +103,10 @@ on: required: true aws_s3_bucket_backup: required: true + azure_service_url_backup: + required: true + azure_blob_storage_container_backup: + required: true mongodb_atlas_ldap_hostname: required: true mongodb_atlas_ldap_username: @@ -364,6 +368,11 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ secrets.aws_secret_access_key }} AWS_ACCESS_KEY_ID: ${{ secrets.aws_access_key_id }} AWS_S3_BUCKET: ${{ secrets.aws_s3_bucket_backup }} + AZURE_BLOB_STORAGE_CONTAINER_NAME: ${{ secrets.azure_blob_storage_container_backup }} + AZURE_SERVICE_URL: ${{ secrets.azure_service_url_backup }} + AZURE_ATLAS_APP_ID: ${{ inputs.azure_atlas_app_id }} + AZURE_SERVICE_PRINCIPAL_ID: ${{ inputs.azure_service_principal_id }} + AZURE_TENANT_ID: ${{ inputs.azure_tenant_id }} ACCTEST_PACKAGES: | ./internal/service/cloudbackupschedule ./internal/service/cloudbackupsnapshot diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index 09a05fd3f8..e02fd08675 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -63,6 +63,8 @@ jobs: aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws_s3_bucket_federation: ${{ secrets.AWS_S3_BUCKET_FEDERATION }} aws_s3_bucket_backup: ${{ secrets.AWS_S3_BUCKET_BACKUP }} + azure_service_url_backup: ${{ secrets.AZURE_SERVICE_URL_BACKUP }} + azure_blob_storage_container_backup: ${{ secrets.AZURE_BLOB_STORAGE_CONTAINER_BACKUP }} mongodb_atlas_ldap_hostname: ${{ secrets.MONGODB_ATLAS_LDAP_HOSTNAME }} mongodb_atlas_ldap_username: ${{ secrets.MONGODB_ATLAS_LDAP_USERNAME }} mongodb_atlas_ldap_password: ${{ secrets.MONGODB_ATLAS_LDAP_PASSWORD }} diff --git a/docs/data-sources/cloud_backup_snapshot_export_bucket.md b/docs/data-sources/cloud_backup_snapshot_export_bucket.md index a715db503b..35dbee7d08 100644 --- a/docs/data-sources/cloud_backup_snapshot_export_bucket.md +++ b/docs/data-sources/cloud_backup_snapshot_export_bucket.md @@ -30,9 +30,13 @@ data "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { In addition to all arguments above, the following attributes are exported: -* `iam_role_id` - Unique identifier of the role that Atlas can use to access the bucket. You must also specify the `bucket_name`. -* `bucket_name` - Name of the bucket that the provided role ID is authorized to access. You must also specify the `iam_role_id`. -* `cloud_provider` - Name of the provider of the cloud service where Atlas can access the S3 bucket. Atlas only supports `AWS`. +* `iam_role_id` - Unique identifier of the role that Atlas can use to access the bucket. +* `bucket_name` - Name of the bucket that the provided role ID is authorized to access. +* `cloud_provider` - Name of the provider of the cloud service where Atlas can access the S3 bucket. +* `role_id` - Unique identifier of the Azure Service Principal that Atlas can use to access the Azure Blob Storage Container. +* `service_url` - URL that identifies the blob Endpoint of the Azure Blob Storage Account. +* `tenant_id` - UUID that identifies the Azure Active Directory Tenant ID. + diff --git a/docs/data-sources/cloud_backup_snapshot_export_buckets.md b/docs/data-sources/cloud_backup_snapshot_export_buckets.md index d57e565439..64a49ab8ff 100644 --- a/docs/data-sources/cloud_backup_snapshot_export_buckets.md +++ b/docs/data-sources/cloud_backup_snapshot_export_buckets.md @@ -39,9 +39,12 @@ In addition to all arguments above, the following attributes are exported: ### CloudProviderSnapshotExportBucket * `project_id` - The unique identifier of the project for the Atlas cluster. * `export_bucket_id` - Unique identifier of the snapshot bucket id. -* `iam_role_id` - Unique identifier of the role that Atlas can use to access the bucket. You must also specify the `bucket_name`. -* `bucket_name` - Name of the bucket that the provided role ID is authorized to access. You must also specify the `iam_role_id`. -* `cloud_provider` - Name of the provider of the cloud service where Atlas can access the S3 bucket. Atlas only supports `AWS`. +* `iam_role_id` - Unique identifier of the role that Atlas can use to access the bucket. +* `bucket_name` - Name of the bucket that the provided role ID is authorized to access. +* `cloud_provider` - Name of the provider of the cloud service where Atlas can access the S3 bucket. +* `role_id` - Unique identifier of the Azure Service Principal that Atlas can use to access the Azure Blob Storage Container. +* `service_url` - URL that identifies the blob Endpoint of the Azure Blob Storage Account. +* `tenant_id` - UUID that identifies the Azure Active Directory Tenant ID. For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/cloud-backup/export/create-one-export-bucket/) diff --git a/docs/resources/cloud_backup_snapshot_export_bucket.md b/docs/resources/cloud_backup_snapshot_export_bucket.md index 2ffef835aa..6a8c6c6a26 100644 --- a/docs/resources/cloud_backup_snapshot_export_bucket.md +++ b/docs/resources/cloud_backup_snapshot_export_bucket.md @@ -7,6 +7,9 @@ ## Example Usage + +### AWS Example + ```terraform resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { project_id = "{PROJECT_ID}" @@ -16,12 +19,28 @@ resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { } ``` +### Azure Example + +```terraform +resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { + project_id = "{PROJECT_ID}" + role_id = "{ROLE_ID}" + service_url = "{SERVICE_URL}" + tenant_id = "{TENANT_ID}" + bucket_name = "example-bucket" + cloud_provider = "AZURE" +} +``` + ## Argument Reference * `project_id` - (Required) The unique identifier of the project for the Atlas cluster. -* `iam_role_id` - (Required) Unique identifier of the role that Atlas can use to access the bucket. You must also specify the `bucket_name`. -* `bucket_name` - (Required) Name of the bucket that the provided role ID is authorized to access. You must also specify the `iam_role_id`. -* `cloud_provider` - (Required) Name of the provider of the cloud service where Atlas can access the S3 bucket. Atlas only supports `AWS`. +* `bucket_name` - (Required) Name of the bucket that the provided role ID is authorized to access. +* `cloud_provider` - (Required) Name of the provider of the cloud service where Atlas can access the S3 bucket. +* `iam_role_id` - Unique identifier of the role that Atlas can use to access the bucket. Required if `cloud_provider` is set to `AWS`. +* `role_id` - Unique identifier of the Azure Service Principal that Atlas can use to access the Azure Blob Storage Container. Required if `cloud_provider` is set to `AZURE`. +* `service_url` - URL that identifies the blob Endpoint of the Azure Blob Storage Account. Required if `cloud_provider` is set to `AZURE`. +* `tenant_id` - UUID that identifies the Azure Active Directory Tenant ID. Required if `cloud_provider` is set to `AZURE`. ## Attributes Reference diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/README.md b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/README.md new file mode 100644 index 0000000000..daaed49ee3 --- /dev/null +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/README.md @@ -0,0 +1,16 @@ +# MongoDB Atlas Provider - Atlas Cloud Backup Snapshot Export Bucket in AWS + +This example shows how to set up Cloud Backup Snapshot Export Bucket in Atlas through Terraform. + +You must set the following variables: + +- `public_key`: Atlas public key +- `private_key`: Atlas private key +- `project_id`: Unique 24-hexadecimal digit string that identifies the project where the stream instance will be created. +- `access_key`: AWS Access Key +- `secret_key`: AWS Secret Key. +- `aws_region`: AWS region. + +To learn more, see the [Export Cloud Backup Snapshot Documentation](https://www.mongodb.com/docs/atlas/backup/cloud-backup/export/). + + diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws-roles.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/aws-roles.tf similarity index 100% rename from examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws-roles.tf rename to examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/aws-roles.tf diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/main.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/main.tf similarity index 100% rename from examples/mongodbatlas_cloud_backup_snapshot_export_bucket/main.tf rename to examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/main.tf diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/provider.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/provider.tf similarity index 100% rename from examples/mongodbatlas_cloud_backup_snapshot_export_bucket/provider.tf rename to examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/provider.tf diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/variables.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/variables.tf similarity index 100% rename from examples/mongodbatlas_cloud_backup_snapshot_export_bucket/variables.tf rename to examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/variables.tf diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/versions.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/versions.tf similarity index 100% rename from examples/mongodbatlas_cloud_backup_snapshot_export_bucket/versions.tf rename to examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/versions.tf diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/README.md b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/README.md new file mode 100644 index 0000000000..3885096d8a --- /dev/null +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/README.md @@ -0,0 +1,21 @@ +# MongoDB Atlas Provider - Atlas Cloud Backup Snapshot Export Bucket in Azure + +This example shows how to set up Cloud Backup Snapshot Export Bucket in Atlas through Terraform. + +You must set the following variables: + +- `public_key`: Atlas public key. +- `private_key`: Atlas private key. +- `project_id`: Unique 24-hexadecimal digit string that identifies the project where the stream instance will be created. +- `azure_tenant_id`: The Tenant ID which should be used. +- `subscription_id`: Azure Subscription ID. +- `client_id`: Azure Client ID. +- `client_secret`: Azure Client Secret. +- `tenant_id`: Azure Tenant ID. +- `azure_atlas_app_id`: The client ID of the application for which to create a service principal. +- `azure_resource_group_location`: The Azure Region where the Resource Group should exist. +- `storage_account_name`: Specifies the name of the storage account. + +To learn more, see the [Export Cloud Backup Snapshot Documentation](https://www.mongodb.com/docs/atlas/backup/cloud-backup/export/). + + diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/azure.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/azure.tf new file mode 100644 index 0000000000..b6f90e6cba --- /dev/null +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/azure.tf @@ -0,0 +1,30 @@ +resource "azuread_service_principal" "mongo" { + client_id = var.azure_atlas_app_id + app_role_assignment_required = false +} + +# Define the resource group +resource "azurerm_resource_group" "test_resource_group" { + name = "mongo-test-resource-group" + location = var.azure_resource_group_location +} + +resource "azurerm_storage_account" "test_storage_account" { + name = var.storage_account_name + resource_group_name = azurerm_resource_group.test_resource_group.name + location = azurerm_resource_group.test_resource_group.location + account_tier = "Standard" + account_replication_type = "LRS" +} + +resource "azurerm_storage_container" "test_storage_container" { + name = "mongo-test-storage-container" + storage_account_name = azurerm_storage_account.test_storage_account.name + container_access_type = "private" +} + +resource "azurerm_role_assignment" "test_role_assignment" { + principal_id = azuread_service_principal.mongo.id + role_definition_name = "Storage Blob Data Contributor" + scope = azurerm_storage_account.test_storage_account.id +} diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/main.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/main.tf new file mode 100644 index 0000000000..4910b17936 --- /dev/null +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/main.tf @@ -0,0 +1,30 @@ +resource "mongodbatlas_cloud_provider_access_setup" "setup_only" { + project_id = var.project_id + provider_name = "AZURE" + azure_config { + atlas_azure_app_id = var.azure_atlas_app_id + service_principal_id = azuread_service_principal.mongo.id + tenant_id = var.tenant_id + } +} + +resource "mongodbatlas_cloud_provider_access_authorization" "auth_role" { + project_id = var.project_id + role_id = mongodbatlas_cloud_provider_access_setup.setup_only.role_id + + azure { + atlas_azure_app_id = var.azure_atlas_app_id + service_principal_id = azuread_service_principal.mongo.id + tenant_id = var.tenant_id + } +} + + +resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { + project_id = var.tenant_id + bucket_name = azurerm_storage_container.test_storage_container.name + cloud_provider = "AZURE" + service_url = azurerm_storage_account.test_storage_account.primary_blob_endpoint + role_id = mongodbatlas_cloud_provider_access_authorization.auth_role.role_id + tenant_id = var.tenant_id +} \ No newline at end of file diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/provider.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/provider.tf new file mode 100644 index 0000000000..d7f7431784 --- /dev/null +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/provider.tf @@ -0,0 +1,15 @@ +provider "mongodbatlas" { + public_key = var.public_key + private_key = var.private_key +} +provider "azuread" { + tenant_id = var.azure_tenant_id +} +provider "azurerm" { + subscription_id = var.subscription_id + client_id = var.client_id + client_secret = var.client_secret + tenant_id = var.tenant_id + features { + } +} \ No newline at end of file diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/variables.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/variables.tf new file mode 100644 index 0000000000..f76cf1143a --- /dev/null +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/variables.tf @@ -0,0 +1,40 @@ +variable "public_key" { + description = "The public API key for MongoDB Atlas" + type = string +} +variable "private_key" { + description = "The private API key for MongoDB Atlas" + type = string +} +variable "project_id" { + description = "Atlas project ID" + type = string +} +variable "azure_tenant_id" { + type = string +} +variable "subscription_id" { + default = "Azure Subscription ID" + type = string +} +variable "client_id" { + default = "Azure Client ID" + type = string +} +variable "client_secret" { + default = "Azure Client Secret" + type = string +} +variable "tenant_id" { + default = "Azure Tenant ID" + type = string +} +variable "azure_atlas_app_id" { + type = string +} +variable "azure_resource_group_location" { + type = string +} +variable "storage_account_name" { + type = string +} diff --git a/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/versions.tf b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/versions.tf new file mode 100644 index 0000000000..dec0bfe787 --- /dev/null +++ b/examples/mongodbatlas_cloud_backup_snapshot_export_bucket/azure/versions.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + azuread = { + source = "hashicorp/azuread" + version = "~> 2.53.1" + } + azurerm = { + source = "hashicorp/azurerm" + version = "~> 3.0" + } + mongodbatlas = { + source = "mongodb/mongodbatlas" + version = "~> 1.0" + } + } + required_version = ">= 1.0" +} diff --git a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go index 8312b770aa..17cc0a46e1 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go +++ b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go @@ -40,6 +40,18 @@ func DataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "role_id": { + Type: schema.TypeString, + Computed: true, + }, + "service_url": { + Type: schema.TypeString, + Computed: true, + }, + "tenant_id": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -71,6 +83,18 @@ func datasourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf("error setting `iam_role_id` for CloudProviderSnapshotExportBuckets (%s): %s", d.Id(), err)) } + if err = d.Set("role_id", bucket.GetRoleId()); err != nil { + return diag.FromErr(fmt.Errorf("error setting `role_id` for CloudProviderSnapshotExportBuckets (%s): %s", d.Id(), err)) + } + + if err = d.Set("service_url", bucket.GetServiceUrl()); err != nil { + return diag.FromErr(fmt.Errorf("error setting `service_url` for CloudProviderSnapshotExportBuckets (%s): %s", d.Id(), err)) + } + + if err = d.Set("tenant_id", bucket.GetTenantId()); err != nil { + return diag.FromErr(fmt.Errorf("error setting `tenant_id` for CloudProviderSnapshotExportBuckets (%s): %s", d.Id(), err)) + } + d.SetId(bucket.GetId()) return nil diff --git a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go index 8b1b93feef..8f432ca57b 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go +++ b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go @@ -47,6 +47,18 @@ func PluralDataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "role_id": { + Type: schema.TypeString, + Computed: true, + }, + "service_url": { + Type: schema.TypeString, + Computed: true, + }, + "tenant_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -98,6 +110,9 @@ func flattenBuckets(buckets []admin.DiskBackupSnapshotExportBucket) []map[string "bucket_name": bucket.GetBucketName(), "cloud_provider": bucket.GetCloudProvider(), "iam_role_id": bucket.GetIamRoleId(), + "role_id": bucket.GetRoleId(), + "service_url": bucket.GetServiceUrl(), + "tenant_id": bucket.GetTenantId(), } } diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go index 9d704b4bd0..4614840a15 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go @@ -56,7 +56,22 @@ func Schema() map[string]*schema.Schema { }, "iam_role_id": { Type: schema.TypeString, - Required: true, + Optional: true, + ForceNew: true, + }, + "role_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "service_url": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "tenant_id": { + Type: schema.TypeString, + Optional: true, ForceNew: true, }, } @@ -68,13 +83,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. projectID := d.Get("project_id").(string) cloudProvider := d.Get("cloud_provider").(string) - if cloudProvider != "AWS" { - return diag.Errorf("atlas only supports AWS") - } request := &admin.DiskBackupSnapshotExportBucket{ IamRoleId: conversion.StringPtr(d.Get("iam_role_id").(string)), BucketName: conversion.StringPtr(d.Get("bucket_name").(string)), + RoleId: conversion.StringPtr(d.Get("role_id").(string)), + ServiceUrl: conversion.StringPtr(d.Get("service_url").(string)), + TenantId: conversion.StringPtr(d.Get("tenant_id").(string)), CloudProvider: &cloudProvider, } @@ -129,6 +144,18 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.Errorf("error setting `project_id` for snapshot export bucket (%s): %s", d.Id(), err) } + if err := d.Set("service_url", exportBackup.ServiceUrl); err != nil { + return diag.Errorf("error setting `service_url` for snapshot export bucket (%s): %s", d.Id(), err) + } + + if err := d.Set("role_id", exportBackup.RoleId); err != nil { + return diag.Errorf("error setting `role_id` for snapshot export bucket (%s): %s", d.Id(), err) + } + + if err := d.Set("tenant_id", exportBackup.TenantId); err != nil { + return diag.Errorf("error setting `tenant_id` for snapshot export bucket (%s): %s", d.Id(), err) + } + return nil } diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_migration_test.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_migration_test.go index dd18e3977e..1e042a8773 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_migration_test.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_migration_test.go @@ -7,5 +7,5 @@ import ( ) func TestMigBackupSnapshotExportBucket_basic(t *testing.T) { - mig.CreateTestAndRunUseExternalProviderNonParallel(t, basicTestCase(t), mig.ExternalProvidersWithAWS(), nil) + mig.CreateTestAndRunUseExternalProviderNonParallel(t, basicAWSTestCase(t), mig.ExternalProvidersWithAWS(), nil) } diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go index e2ba34e1c3..4647c3a975 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go @@ -18,19 +18,44 @@ var ( dataSourcePluralName = "data.mongodbatlas_cloud_backup_snapshot_export_buckets.test" ) -func TestAccBackupSnapshotExportBucket_basic(t *testing.T) { - resource.ParallelTest(t, *basicTestCase(t)) +func TestAccBackupSnapshotExportBucket_basicAWS(t *testing.T) { + resource.ParallelTest(t, *basicAWSTestCase(t)) } -func basicTestCase(tb testing.TB) *resource.TestCase { +func TestAccBackupSnapshotExportBucket_basicAzure(t *testing.T) { + resource.ParallelTest(t, *basicAzureTestCase(t)) +} + +func basicAWSTestCase(tb testing.TB) *resource.TestCase { tb.Helper() var ( - projectID = acc.ProjectIDExecution(tb) - bucketName = os.Getenv("AWS_S3_BUCKET") - policyName = acc.RandomName() - roleName = acc.RandomIAMRole() + projectID = acc.ProjectIDExecution(tb) + bucketName = os.Getenv("AWS_S3_BUCKET") + policyName = acc.RandomName() + roleName = acc.RandomIAMRole() + attrMapCheck = map[string]string{ + "project_id": projectID, + "bucket_name": bucketName, + "cloud_provider": "AWS", + } + pluralAttrMapCheck = map[string]string{ + "project_id": projectID, + "results.#": "1", + "results.0.bucket_name": bucketName, + "results.0.cloud_provider": "AWS", + } + attrsSet = []string{ + "iam_role_id", + } ) + checks := []resource.TestCheckFunc{checkExists(resourceName)} + checks = acc.AddAttrChecks(resourceName, checks, attrMapCheck) + checks = acc.AddAttrSetChecks(resourceName, checks, attrsSet...) + checks = acc.AddAttrChecks(dataSourceName, checks, attrMapCheck) + checks = acc.AddAttrSetChecks(dataSourceName, checks, attrsSet...) + checks = acc.AddAttrChecks(dataSourcePluralName, checks, pluralAttrMapCheck) + checks = acc.AddAttrSetChecks(dataSourcePluralName, checks, []string{"results.0.iam_role_id"}...) return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(tb); acc.PreCheckS3Bucket(tb) }, @@ -39,25 +64,68 @@ func basicTestCase(tb testing.TB) *resource.TestCase { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configBasic(projectID, bucketName, policyName, roleName), - Check: resource.ComposeAggregateTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "project_id", projectID), - resource.TestCheckResourceAttr(resourceName, "bucket_name", bucketName), - resource.TestCheckResourceAttr(resourceName, "cloud_provider", "AWS"), - resource.TestCheckResourceAttrSet(resourceName, "iam_role_id"), - - resource.TestCheckResourceAttr(dataSourceName, "project_id", projectID), - resource.TestCheckResourceAttr(dataSourceName, "bucket_name", bucketName), - resource.TestCheckResourceAttr(dataSourceName, "cloud_provider", "AWS"), - resource.TestCheckResourceAttrSet(dataSourceName, "iam_role_id"), - - resource.TestCheckResourceAttr(dataSourcePluralName, "project_id", projectID), - resource.TestCheckResourceAttr(dataSourcePluralName, "results.#", "1"), - resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.bucket_name", bucketName), - resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.cloud_provider", "AWS"), - resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.iam_role_id"), - ), + Config: configAWSBasic(projectID, bucketName, policyName, roleName), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: importStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + } +} + +func basicAzureTestCase(t *testing.T) *resource.TestCase { + t.Helper() + + var ( + projectID = acc.ProjectIDExecution(t) + tenantID = os.Getenv("AZURE_TENANT_ID") + bucketName = os.Getenv("AZURE_BLOB_STORAGE_CONTAINER_NAME") + serviceURL = os.Getenv("AZURE_SERVICE_URL") + atlasAzureAppID = os.Getenv("AZURE_ATLAS_APP_ID") + servicePrincipalID = os.Getenv("AZURE_SERVICE_PRINCIPAL_ID") + attrMapCheck = map[string]string{ + "project_id": projectID, + "bucket_name": bucketName, + "service_url": serviceURL, + "tenant_id": tenantID, + "cloud_provider": "AZURE", + } + pluralAttrMapCheck = map[string]string{ + "project_id": projectID, + "results.#": "1", + "results.0.bucket_name": bucketName, + "results.0.service_url": serviceURL, + "results.0.tenant_id": tenantID, + "results.0.cloud_provider": "AZURE", + } + attrsSet = []string{ + "role_id", + } + ) + checks := []resource.TestCheckFunc{checkExists(resourceName)} + checks = acc.AddAttrChecks(resourceName, checks, attrMapCheck) + checks = acc.AddAttrSetChecks(resourceName, checks, attrsSet...) + checks = acc.AddAttrChecks(dataSourceName, checks, attrMapCheck) + checks = acc.AddAttrSetChecks(dataSourceName, checks, attrsSet...) + checks = acc.AddAttrChecks(dataSourcePluralName, checks, pluralAttrMapCheck) + checks = acc.AddAttrSetChecks(dataSourcePluralName, checks, []string{"results.0.role_id"}...) + + return &resource.TestCase{ + PreCheck: func() { + acc.PreCheckBasic(t) + acc.PreCheckCloudProviderAccessAzure(t) + acc.PreCheckAzureExportBucket(t) + }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: checkDestroy, + Steps: []resource.TestStep{ + { + Config: configAzureBasic(projectID, atlasAzureAppID, servicePrincipalID, tenantID, bucketName, serviceURL), + Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { ResourceName: resourceName, @@ -112,7 +180,7 @@ func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { } } -func configBasic(projectID, bucketName, policyName, roleName string) string { +func configAWSBasic(projectID, bucketName, policyName, roleName string) string { return fmt.Sprintf(` resource "aws_iam_role_policy" "test_policy" { name = %[3]q @@ -193,3 +261,48 @@ func configBasic(projectID, bucketName, policyName, roleName string) string { } `, projectID, bucketName, policyName, roleName) } + +func configAzureBasic(projectID, atlasAzureAppID, servicePrincipalID, tenantID, bucketName, serviceURL string) string { + return fmt.Sprintf(` + resource "mongodbatlas_cloud_provider_access_setup" "setup_only" { + project_id = %[1]q + provider_name = "AZURE" + azure_config { + atlas_azure_app_id = %[2]q + service_principal_id = %[3]q + tenant_id = %[4]q + } + } + + resource "mongodbatlas_cloud_provider_access_authorization" "auth_role" { + project_id = %[1]q + role_id = mongodbatlas_cloud_provider_access_setup.setup_only.role_id + + azure { + atlas_azure_app_id = %[2]q + service_principal_id = %[3]q + tenant_id = %[4]q + } + } + + + resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { + project_id = %[1]q + bucket_name = %[5]q + cloud_provider = "AZURE" + service_url = %[6]q + role_id = mongodbatlas_cloud_provider_access_authorization.auth_role.role_id + tenant_id = %[4]q + } + + data "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { + project_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.project_id + export_bucket_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.export_bucket_id + id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.export_bucket_id + } + + data "mongodbatlas_cloud_backup_snapshot_export_buckets" "test" { + project_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.project_id + } + `, projectID, atlasAzureAppID, servicePrincipalID, tenantID, bucketName, serviceURL) +} diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index 339f092e05..b0fba2919a 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -296,3 +296,11 @@ func PreCheckS3Bucket(tb testing.TB) { tb.Fatal("`AWS_S3_BUCKET` must be set ") } } + +func PreCheckAzureExportBucket(tb testing.TB) { + tb.Helper() + if os.Getenv("AZURE_SERVICE_URL") == "" || + os.Getenv("AZURE_BLOB_STORAGE_CONTAINER_NAME") == "" { + tb.Fatal("`AZURE_SERVICE_URL` and `AZURE_SERVICE_URL`must be set for Cloud Backup Snapshot Export Bucket acceptance testing") + } +} diff --git a/templates/data-source.md.tmpl b/templates/data-source.md.tmpl index 32b76776d1..45b3c38584 100644 --- a/templates/data-source.md.tmpl +++ b/templates/data-source.md.tmpl @@ -28,6 +28,8 @@ {{ tffile "examples/mongodbatlas_federated_settings_org_role_mapping/main.tf" }} {{ else if eq .Name "mongodbatlas_cloud_backup_snapshot" }} {{ tffile "examples/mongodbatlas_cloud_backup_snapshot_export_job/main.tf" }} + {{ else if eq .Name "mongodbatlas_cloud_backup_snapshot_export_bucket" }} + {{ tffile "examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/main.tf" }} {{ else if eq .Name "mongodbatlas_api_key" }} {{ tffile (printf "examples/%s/create-and-assign-pak/main.tf" .Name )}} {{ else if eq .Name "mongodbatlas_backup_compliance_policy" }} diff --git a/templates/resources.md.tmpl b/templates/resources.md.tmpl index 8b86768a70..ed9ba98760 100644 --- a/templates/resources.md.tmpl +++ b/templates/resources.md.tmpl @@ -30,6 +30,8 @@ {{ tffile "examples/mongodbatlas_cloud_backup_snapshot_export_job/main.tf" }} {{ else if eq .Name "mongodbatlas_api_key" }} {{ tffile (printf "examples/%s/create-and-assign-pak/main.tf" .Name )}} + {{ else if eq .Name "mongodbatlas_cloud_backup_snapshot_export_bucket" }} + {{ tffile "examples/mongodbatlas_cloud_backup_snapshot_export_bucket/aws/main.tf" }} {{ else if eq .Name "mongodbatlas_backup_compliance_policy" }} {{ else if eq .Name "mongodbatlas_event_trigger" }} {{ else if eq .Name "mongodbatlas_access_list_api_key" }} From 40a3b9f51b53b04b8692bc7b4eb661e6547188fa Mon Sep 17 00:00:00 2001 From: svc-apix-bot Date: Thu, 8 Aug 2024 12:14:38 +0000 Subject: [PATCH 77/84] chore: Updates CHANGELOG.md for #2486 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ef845524bb..5c30d1f9ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## (Unreleased) +ENHANCEMENTS: + +* data-source/mongodbatlas_cloud_backup_snapshot_export_bucket: Adds Azure support ([#2486](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2486)) +* data-source/mongodbatlas_cloud_backup_snapshot_export_buckets: Adds Azure support ([#2486](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2486)) +* resource/mongodbatlas_cloud_backup_snapshot_export_bucket: Adds Azure support ([#2486](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2486)) + ## 1.17.6 (August 07, 2024) BUG FIXES: From 2059bf63b292874d91f17a458248fc4798cebd04 Mon Sep 17 00:00:00 2001 From: Oriol Date: Fri, 9 Aug 2024 11:11:38 +0200 Subject: [PATCH 78/84] chore: Improves backup_compliance_policy test(#2484) --- .../resource_backup_compliance_policy_test.go | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go index bc99b374e0..5b1391c8f5 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go @@ -129,7 +129,7 @@ func TestAccBackupCompliancePolicy_UpdateSetsAllAttributes(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID), + Config: configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID, "7"), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "authorized_user_first_name", "First"), @@ -140,7 +140,18 @@ func TestAccBackupCompliancePolicy_UpdateSetsAllAttributes(t *testing.T) { ), }, { - Config: configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID), + Config: configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID, "8"), + Check: resource.ComposeAggregateTestCheckFunc( + checkExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "authorized_user_first_name", "First"), + resource.TestCheckResourceAttr(resourceName, "authorized_user_last_name", "Last"), + resource.TestCheckResourceAttr(resourceName, "pit_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "encryption_at_rest_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "copy_protection_enabled", "true"), + ), + }, + { + Config: configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID, "8"), ConfigPlanChecks: resource.ConfigPlanChecks{ PreApply: []plancheck.PlanCheck{ acc.DebugPlan(), @@ -456,9 +467,9 @@ func basicChecks() []resource.TestCheckFunc { return checks } -func configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID string) string { +func configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, projectOwnerID, restreWindowDays string) string { return acc.ConfigProjectWithSettings(projectName, orgID, projectOwnerID, false) + - `resource "mongodbatlas_backup_compliance_policy" "backup_policy_res" { + fmt.Sprintf(`resource "mongodbatlas_backup_compliance_policy" "backup_policy_res" { project_id = mongodbatlas_project.test.id authorized_email = "test@example.com" authorized_user_first_name = "First" @@ -467,7 +478,7 @@ func configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, p pit_enabled = false encryption_at_rest_enabled = false - restore_window_days = 7 + restore_window_days = %[1]s on_demand_policy_item { frequency_interval = 0 @@ -498,5 +509,5 @@ func configBasicWithOptionalAttributesWithNonDefaultValues(projectName, orgID, p retention_unit = "months" retention_value = 12 } - }` + }`, restreWindowDays) } From 5f715ab17979b2f2be2e0323be5ebf975da707df Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Fri, 9 Aug 2024 14:24:51 +0200 Subject: [PATCH 79/84] chore: Updates Atlas Go SDK to version 2024-08-05 (#2487) * automatic changes with renaming * fix trivial compilation errors * include 2024-05-30 version and adjust cloud-backup-schedule to use old SDK * adjust global-cluster-config to use old API * adjust advanced-cluster to use old API * fix hcl config generation remove num_shards attribute --- go.mod | 3 +- go.sum | 6 +- internal/common/conversion/flatten_expand.go | 28 ++++- internal/config/client.go | 38 +++++-- .../data_source_accesslist_api_keys.go | 2 +- .../resource_access_list_api_key.go | 2 +- .../data_source_advanced_cluster.go | 10 +- .../data_source_advanced_clusters.go | 16 +-- .../advancedcluster/model_advanced_cluster.go | 102 +++++++++--------- .../model_advanced_cluster_test.go | 62 +++++------ .../resource_advanced_cluster.go | 98 ++++++++--------- .../resource_advanced_cluster_test.go | 42 ++++---- .../data_source_alert_configuration.go | 2 +- .../data_source_alert_configurations.go | 2 +- .../model_alert_configuration.go | 2 +- .../model_alert_configuration_test.go | 2 +- .../resource_alert_configuration.go | 2 +- .../service/apikey/data_source_api_keys.go | 2 +- internal/service/apikey/resource_api_key.go | 2 +- .../atlasuser/data_source_atlas_user.go | 2 +- .../atlasuser/data_source_atlas_user_test.go | 2 +- .../atlasuser/data_source_atlas_users.go | 2 +- .../atlasuser/data_source_atlas_users_test.go | 2 +- .../service/auditing/resource_auditing.go | 2 +- .../resource_backup_compliance_policy.go | 2 +- .../data_source_cloud_backup_schedule.go | 4 +- .../resource_cloud_backup_schedule.go | 60 +++++------ ...ce_cloud_backup_schedule_migration_test.go | 4 +- .../resource_cloud_backup_schedule_test.go | 36 +++---- .../data_source_cloud_backup_snapshots.go | 2 +- .../model_cloud_backup_snapshot.go | 2 +- .../model_cloud_backup_snapshot_test.go | 2 +- .../resource_cloud_backup_snapshot.go | 2 +- ...ce_cloud_backup_snapshot_export_buckets.go | 2 +- ...rce_cloud_backup_snapshot_export_bucket.go | 6 +- ...ource_cloud_backup_snapshot_export_jobs.go | 2 +- ...source_cloud_backup_snapshot_export_job.go | 2 +- ...urce_cloud_backup_snapshot_restore_jobs.go | 2 +- ...ource_cloud_backup_snapshot_restore_job.go | 2 +- ...rce_cloud_provider_access_authorization.go | 2 +- .../resource_cloud_provider_access_setup.go | 2 +- .../resource_cluster_outage_simulation.go | 2 +- .../service/controlplaneipaddresses/model.go | 2 +- .../controlplaneipaddresses/model_test.go | 2 +- .../data_source_custom_db_roles.go | 2 +- .../customdbrole/resource_custom_db_role.go | 2 +- .../resource_custom_db_role_test.go | 2 +- ...ce_custom_dns_configuration_cluster_aws.go | 2 +- .../databaseuser/model_database_user.go | 2 +- .../databaseuser/model_database_user_test.go | 2 +- .../resource_database_user_migration_test.go | 2 +- .../resource_database_user_test.go | 2 +- .../data_source_data_lake_pipeline_run.go | 2 +- .../data_source_data_lake_pipeline_runs.go | 2 +- .../data_source_data_lake_pipelines.go | 2 +- .../resource_data_lake_pipeline.go | 2 +- .../model_encryption_at_rest.go | 2 +- .../model_encryption_at_rest_test.go | 2 +- .../resource_encryption_at_rest.go | 2 +- ...ource_encryption_at_rest_migration_test.go | 2 +- .../resource_encryption_at_rest_test.go | 4 +- ...source_federated_database_instance_test.go | 2 +- ...ata_source_federated_database_instances.go | 2 +- .../resource_federated_database_instance.go | 6 +- .../data_source_federated_query_limits.go | 2 +- .../resource_federated_query_limit.go | 2 +- ...e_federated_settings_identity_providers.go | 2 +- ...el_federated_settings_identity_provider.go | 2 +- ...derated_settings_identity_provider_test.go | 2 +- .../data_source_federated_settings.go | 2 +- ...ource_federated_settings_connected_orgs.go | 2 +- ...model_federated_settings_connected_orgs.go | 2 +- ...ce_federated_settings_org_role_mappings.go | 2 +- ...del_federated_settings_org_role_mapping.go | 2 +- ...rce_federated_settings_org_role_mapping.go | 2 +- .../data_source_global_cluster_config.go | 4 +- .../resource_global_cluster_config.go | 42 ++++---- .../resource_ldap_configuration.go | 2 +- .../ldapverify/resource_ldap_verify.go | 2 +- .../resource_maintenance_window.go | 2 +- .../data_source_network_containers.go | 2 +- .../resource_network_container.go | 2 +- .../data_source_network_peering.go | 2 +- .../data_source_network_peerings.go | 2 +- .../resource_network_peering.go | 2 +- .../onlinearchive/resource_online_archive.go | 2 +- .../organization/data_source_organizations.go | 2 +- .../organization/resource_organization.go | 2 +- .../resource_organization_test.go | 2 +- .../orginvitation/resource_org_invitation.go | 2 +- ...resource_private_endpoint_regional_mode.go | 5 +- .../resource_privatelink_endpoint.go | 2 +- ...esource_privatelink_endpoint_serverless.go | 2 +- .../resource_privatelink_endpoint_service.go | 8 +- ...service_data_federation_online_archives.go | 2 +- ..._service_data_federation_online_archive.go | 2 +- ...rivatelink_endpoints_service_serverless.go | 2 +- ...privatelink_endpoint_service_serverless.go | 2 +- .../service/project/data_source_project.go | 2 +- .../service/project/data_source_projects.go | 2 +- internal/service/project/model_project.go | 2 +- .../service/project/model_project_test.go | 2 +- internal/service/project/resource_project.go | 4 +- .../resource_project_migration_test.go | 2 +- .../service/project/resource_project_test.go | 14 +-- .../data_source_project_api_keys.go | 2 +- .../projectapikey/resource_project_api_key.go | 2 +- .../resource_project_invitation.go | 2 +- .../model_project_ip_access_list.go | 2 +- .../model_project_ip_access_list_test.go | 2 +- .../resource_project_ip_access_list.go | 2 +- internal/service/pushbasedlogexport/model.go | 2 +- .../service/pushbasedlogexport/model_test.go | 2 +- .../service/pushbasedlogexport/resource.go | 2 +- .../pushbasedlogexport/state_transition.go | 2 +- .../state_transition_test.go | 4 +- .../model_search_deployment.go | 2 +- .../model_search_deployment_test.go | 2 +- .../state_transition_search_deployment.go | 2 +- ...state_transition_search_deployment_test.go | 4 +- .../searchindex/data_source_search_indexes.go | 2 +- .../service/searchindex/model_search_index.go | 2 +- .../searchindex/resource_search_index.go | 2 +- .../data_source_serverless_instances.go | 2 +- .../resource_serverless_instance.go | 2 +- .../resource_serverless_instance_test.go | 2 +- ...a_source_cloud_shared_tier_restore_jobs.go | 2 +- .../data_source_shared_tier_snapshots.go | 2 +- .../data_source_stream_connections.go | 2 +- .../data_source_stream_connections_test.go | 2 +- .../model_stream_connection.go | 2 +- .../model_stream_connection_test.go | 2 +- .../data_source_stream_instances.go | 2 +- .../data_source_stream_instances_test.go | 2 +- .../streaminstance/model_stream_instance.go | 2 +- .../model_stream_instance_test.go | 2 +- internal/service/team/data_source_team.go | 2 +- internal/service/team/resource_team.go | 2 +- .../data_source_third_party_integrations.go | 2 +- ...ource_x509_authentication_database_user.go | 2 +- internal/testutil/acc/atlas.go | 16 +-- internal/testutil/acc/cluster.go | 22 ++-- internal/testutil/acc/config_cluster.go | 6 +- internal/testutil/acc/config_cluster_test.go | 27 ++--- internal/testutil/acc/database_user.go | 2 +- internal/testutil/acc/factory.go | 7 +- internal/testutil/acc/project.go | 2 +- internal/testutil/acc/serverless.go | 2 +- 148 files changed, 488 insertions(+), 436 deletions(-) diff --git a/go.mod b/go.mod index 2aed9486b3..291b2d375b 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,8 @@ require ( github.com/stretchr/testify v1.9.0 github.com/zclconf/go-cty v1.15.0 go.mongodb.org/atlas v0.36.0 - go.mongodb.org/atlas-sdk/v20240530002 v20240530002.0.0 + go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 + go.mongodb.org/atlas-sdk/v20240805001 v20240805001.0.0 go.mongodb.org/realm v0.1.0 ) diff --git a/go.sum b/go.sum index 06c8ddeaea..2910145caf 100644 --- a/go.sum +++ b/go.sum @@ -780,8 +780,10 @@ github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgK go.mongodb.org/atlas v0.12.0/go.mod h1:wVCnHcm/7/IfTjEB6K8K35PLG70yGz8BdkRwX0oK9/M= go.mongodb.org/atlas v0.36.0 h1:m05S3AO7zkl+bcG1qaNsEKBnAqnKx2FDwLooHpIG3j4= go.mongodb.org/atlas v0.36.0/go.mod h1:nfPldE9dSama6G2IbIzmEza02Ly7yFZjMMVscaM0uEc= -go.mongodb.org/atlas-sdk/v20240530002 v20240530002.0.0 h1:D+e3bpRwa9WH3HHs8bLjOdjTp1vdlp83ZYithzGbaQ8= -go.mongodb.org/atlas-sdk/v20240530002 v20240530002.0.0/go.mod h1:seuG5HpfG20/8FhJGyWi4yL7hqAcmq7pf/G0gipNOyM= +go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 h1:d/gbYJ+obR0EM/3DZf7+ZMi2QWISegm3mid7Or708cc= +go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0/go.mod h1:O47ZrMMfcWb31wznNIq2PQkkdoFoK0ea2GlmRqGJC2s= +go.mongodb.org/atlas-sdk/v20240805001 v20240805001.0.0 h1:EwA2g7i4JYc0b/oE7zvvOH+POYVrHrWR7BONex3MFTA= +go.mongodb.org/atlas-sdk/v20240805001 v20240805001.0.0/go.mod h1:0aHEphVfsYbpg3CiEUcXeAU7OVoOFig1tltXdLjYiSQ= go.mongodb.org/realm v0.1.0 h1:zJiXyLaZrznQ+Pz947ziSrDKUep39DO4SfA0Fzx8M4M= go.mongodb.org/realm v0.1.0/go.mod h1:4Vj6iy+Puo1TDERcoh4XZ+pjtwbOzPpzqy3Cwe8ZmDM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= diff --git a/internal/common/conversion/flatten_expand.go b/internal/common/conversion/flatten_expand.go index 229934db0e..14148596e3 100644 --- a/internal/common/conversion/flatten_expand.go +++ b/internal/common/conversion/flatten_expand.go @@ -3,7 +3,8 @@ package conversion import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func FlattenLinks(links []admin.Link) []map[string]string { @@ -28,6 +29,17 @@ func FlattenTags(tags []admin.ResourceTag) []map[string]string { return ret } +func FlattenTagsOldSDK(tags []admin20240530.ResourceTag) []map[string]string { + ret := make([]map[string]string, len(tags)) + for i, tag := range tags { + ret[i] = map[string]string{ + "key": tag.GetKey(), + "value": tag.GetValue(), + } + } + return ret +} + func ExpandTagsFromSetSchema(d *schema.ResourceData) *[]admin.ResourceTag { list := d.Get("tags").(*schema.Set) ret := make([]admin.ResourceTag, list.Len()) @@ -41,6 +53,20 @@ func ExpandTagsFromSetSchema(d *schema.ResourceData) *[]admin.ResourceTag { return &ret } +// this will be removed once ISS dev branch is merged +func ExpandTagsFromSetSchemaOldSDK(d *schema.ResourceData) *[]admin20240530.ResourceTag { + list := d.Get("tags").(*schema.Set) + ret := make([]admin20240530.ResourceTag, list.Len()) + for i, item := range list.List() { + tag := item.(map[string]any) + ret[i] = admin20240530.ResourceTag{ + Key: tag["key"].(string), + Value: tag["value"].(string), + } + } + return &ret +} + func ExpandStringList(list []any) (res []string) { for _, v := range list { res = append(res, v.(string)) diff --git a/internal/config/client.go b/internal/config/client.go index f9b5f93e02..8b31a10ccd 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -9,7 +9,8 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" matlasClient "go.mongodb.org/atlas/mongodbatlas" realmAuth "go.mongodb.org/realm/auth" "go.mongodb.org/realm/realm" @@ -28,9 +29,10 @@ const ( // MongoDBClient contains the mongodbatlas clients and configurations type MongoDBClient struct { - Atlas *matlasClient.Client - AtlasV2 *admin.APIClient - Config *Config + Atlas *matlasClient.Client + AtlasV2 *admin.APIClient + AtlasV220240530 *admin20240530.APIClient // used in advanced_cluster and cloud_backup_schedule for avoiding breaking changes + Config *Config } // Config contains the configurations needed to use SDKs @@ -103,10 +105,16 @@ func (c *Config) NewClient(ctx context.Context) (any, error) { return nil, err } + sdkV220240530Client, err := c.newSDKV220240530Client(client) + if err != nil { + return nil, err + } + clients := &MongoDBClient{ - Atlas: atlasClient, - AtlasV2: sdkV2Client, - Config: c, + Atlas: atlasClient, + AtlasV2: sdkV2Client, + AtlasV220240530: sdkV220240530Client, + Config: c, } return clients, nil @@ -128,6 +136,22 @@ func (c *Config) newSDKV2Client(client *http.Client) (*admin.APIClient, error) { return sdkv2, nil } +func (c *Config) newSDKV220240530Client(client *http.Client) (*admin20240530.APIClient, error) { + opts := []admin20240530.ClientModifier{ + admin20240530.UseHTTPClient(client), + admin20240530.UseUserAgent(userAgent(c)), + admin20240530.UseBaseURL(c.BaseURL), + admin20240530.UseDebug(false)} + + // Initialize the MongoDB Versioned Atlas Client. + sdkv2, err := admin20240530.NewClient(opts...) + if err != nil { + return nil, err + } + + return sdkv2, nil +} + func (c *MongoDBClient) GetRealmClient(ctx context.Context) (*realm.Client, error) { // Realm if c.Config.PublicKey == "" && c.Config.PrivateKey == "" { diff --git a/internal/service/accesslistapikey/data_source_accesslist_api_keys.go b/internal/service/accesslistapikey/data_source_accesslist_api_keys.go index ccc34007c2..0ce79a22d4 100644 --- a/internal/service/accesslistapikey/data_source_accesslist_api_keys.go +++ b/internal/service/accesslistapikey/data_source_accesslist_api_keys.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/accesslistapikey/resource_access_list_api_key.go b/internal/service/accesslistapikey/resource_access_list_api_key.go index 1eaf6751f5..f099ec0e14 100644 --- a/internal/service/accesslistapikey/resource_access_list_api_key.go +++ b/internal/service/accesslistapikey/resource_access_list_api_key.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/advancedcluster/data_source_advanced_cluster.go b/internal/service/advancedcluster/data_source_advanced_cluster.go index 38b15521a4..32b95da947 100644 --- a/internal/service/advancedcluster/data_source_advanced_cluster.go +++ b/internal/service/advancedcluster/data_source_advanced_cluster.go @@ -234,11 +234,11 @@ func DataSource() *schema.Resource { } func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) clusterName := d.Get("name").(string) - cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + cluster, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil @@ -278,7 +278,7 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "labels", clusterName, err)) } - if err := d.Set("tags", conversion.FlattenTags(cluster.GetTags())); err != nil { + if err := d.Set("tags", conversion.FlattenTagsOldSDK(cluster.GetTags())); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "tags", clusterName, err)) } @@ -302,7 +302,7 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "pit_enabled", clusterName, err)) } - replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV2) + replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV220240530) if err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } @@ -328,7 +328,7 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err)) } - processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() + processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { return diag.FromErr(fmt.Errorf(ErrorAdvancedConfRead, clusterName, err)) } diff --git a/internal/service/advancedcluster/data_source_advanced_clusters.go b/internal/service/advancedcluster/data_source_advanced_clusters.go index ed6736d947..e358dd9b5e 100644 --- a/internal/service/advancedcluster/data_source_advanced_clusters.go +++ b/internal/service/advancedcluster/data_source_advanced_clusters.go @@ -12,7 +12,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) func PluralDataSource() *schema.Resource { @@ -245,33 +245,33 @@ func PluralDataSource() *schema.Resource { } func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) d.SetId(id.UniqueId()) - list, resp, err := connV2.ClustersApi.ListClusters(ctx, projectID).Execute() + list, resp, err := connV220240530.ClustersApi.ListClusters(ctx, projectID).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil } return diag.FromErr(fmt.Errorf("error reading advanced cluster list for project(%s): %s", projectID, err)) } - if err := d.Set("results", flattenAdvancedClusters(ctx, connV2, list.GetResults(), d)); err != nil { + if err := d.Set("results", flattenAdvancedClusters(ctx, connV220240530, list.GetResults(), d)); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "results", d.Id(), err)) } return nil } -func flattenAdvancedClusters(ctx context.Context, connV2 *admin.APIClient, clusters []admin.AdvancedClusterDescription, d *schema.ResourceData) []map[string]any { +func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530.APIClient, clusters []admin20240530.AdvancedClusterDescription, d *schema.ResourceData) []map[string]any { results := make([]map[string]any, 0, len(clusters)) for i := range clusters { cluster := &clusters[i] - processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute() + processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute() if err != nil { log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", cluster.GetId(), err) } - replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), nil, d, connV2) + replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), nil, d, connV220240530) if err != nil { log.Printf("[WARN] Error setting `replication_specs` for the cluster(%s): %s", cluster.GetId(), err) } @@ -286,7 +286,7 @@ func flattenAdvancedClusters(ctx context.Context, connV2 *admin.APIClient, clust "disk_size_gb": cluster.GetDiskSizeGB(), "encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(), "labels": flattenLabels(cluster.GetLabels()), - "tags": conversion.FlattenTags(cluster.GetTags()), + "tags": conversion.FlattenTagsOldSDK(cluster.GetTags()), "mongo_db_major_version": cluster.GetMongoDBMajorVersion(), "mongo_db_version": cluster.GetMongoDBVersion(), "name": cluster.GetName(), diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 8f26c1312b..0d5feb4d84 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) var ( @@ -275,7 +275,7 @@ func IsSharedTier(instanceSize string) bool { return instanceSize == "M0" || instanceSize == "M2" || instanceSize == "M5" } -func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin.ClustersApi) retry.StateRefreshFunc { +func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin20240530.ClustersApi) retry.StateRefreshFunc { return func() (any, string, error) { cluster, resp, err := client.GetCluster(ctx, projectID, name).Execute() @@ -300,7 +300,7 @@ func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admi } } -func ResourceClusterListAdvancedRefreshFunc(ctx context.Context, projectID string, clustersAPI admin.ClustersApi) retry.StateRefreshFunc { +func ResourceClusterListAdvancedRefreshFunc(ctx context.Context, projectID string, clustersAPI admin20240530.ClustersApi) retry.StateRefreshFunc { return func() (any, string, error) { clusters, resp, err := clustersAPI.ListClusters(ctx, projectID).Execute() @@ -339,7 +339,7 @@ func FormatMongoDBMajorVersion(val any) string { return fmt.Sprintf("%.1f", cast.ToFloat32(val)) } -func flattenLabels(l []admin.ComponentLabel) []map[string]string { +func flattenLabels(l []admin20240530.ComponentLabel) []map[string]string { labels := make([]map[string]string, 0, len(l)) for _, item := range l { if item.GetKey() == ignoreLabel { @@ -353,7 +353,7 @@ func flattenLabels(l []admin.ComponentLabel) []map[string]string { return labels } -func flattenConnectionStrings(str admin.ClusterConnectionStrings) []map[string]any { +func flattenConnectionStrings(str admin20240530.ClusterConnectionStrings) []map[string]any { return []map[string]any{ { "standard": str.GetStandard(), @@ -365,7 +365,7 @@ func flattenConnectionStrings(str admin.ClusterConnectionStrings) []map[string]a } } -func flattenPrivateEndpoint(privateEndpoints []admin.ClusterDescriptionConnectionStringsPrivateEndpoint) []map[string]any { +func flattenPrivateEndpoint(privateEndpoints []admin20240530.ClusterDescriptionConnectionStringsPrivateEndpoint) []map[string]any { endpoints := make([]map[string]any, 0, len(privateEndpoints)) for _, endpoint := range privateEndpoints { endpoints = append(endpoints, map[string]any{ @@ -379,7 +379,7 @@ func flattenPrivateEndpoint(privateEndpoints []admin.ClusterDescriptionConnectio return endpoints } -func flattenEndpoints(listEndpoints []admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) []map[string]any { +func flattenEndpoints(listEndpoints []admin20240530.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) []map[string]any { endpoints := make([]map[string]any, 0, len(listEndpoints)) for _, endpoint := range listEndpoints { endpoints = append(endpoints, map[string]any{ @@ -391,7 +391,7 @@ func flattenEndpoints(listEndpoints []admin.ClusterDescriptionConnectionStringsP return endpoints } -func flattenBiConnectorConfig(biConnector admin.BiConnector) []map[string]any { +func flattenBiConnectorConfig(biConnector admin20240530.BiConnector) []map[string]any { return []map[string]any{ { "enabled": biConnector.GetEnabled(), @@ -400,11 +400,11 @@ func flattenBiConnectorConfig(biConnector admin.BiConnector) []map[string]any { } } -func expandBiConnectorConfig(d *schema.ResourceData) *admin.BiConnector { +func expandBiConnectorConfig(d *schema.ResourceData) *admin20240530.BiConnector { if v, ok := d.GetOk("bi_connector_config"); ok { if biConn := v.([]any); len(biConn) > 0 { biConnMap := biConn[0].(map[string]any) - return &admin.BiConnector{ + return &admin20240530.BiConnector{ Enabled: conversion.Pointer(cast.ToBool(biConnMap["enabled"])), ReadPreference: conversion.StringPtr(cast.ToString(biConnMap["read_preference"])), } @@ -413,7 +413,7 @@ func expandBiConnectorConfig(d *schema.ResourceData) *admin.BiConnector { return nil } -func flattenProcessArgs(p *admin.ClusterDescriptionProcessArgs) []map[string]any { +func flattenProcessArgs(p *admin20240530.ClusterDescriptionProcessArgs) []map[string]any { if p == nil { return nil } @@ -434,8 +434,8 @@ func flattenProcessArgs(p *admin.ClusterDescriptionProcessArgs) []map[string]any } } -func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.ReplicationSpec, tfMapObjects []any, - d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { +func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin20240530.ReplicationSpec, tfMapObjects []any, + d *schema.ResourceData, connV220240530 *admin20240530.APIClient) ([]map[string]any, error) { if len(apiObjects) == 0 { return nil, nil } @@ -455,7 +455,7 @@ func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.Rep continue } - advancedReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, &apiObjects[j], tfMapObject, d, connV2) + advancedReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, &apiObjects[j], tfMapObject, d, connV220240530) if err != nil { return nil, err @@ -479,7 +479,7 @@ func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.Rep } j := slices.IndexFunc(wasAPIObjectUsed, func(isUsed bool) bool { return !isUsed }) - advancedReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, &apiObjects[j], tfMapObject, d, connV2) + advancedReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, &apiObjects[j], tfMapObject, d, connV220240530) if err != nil { return nil, err @@ -492,12 +492,12 @@ func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.Rep return tfList, nil } -func doesAdvancedReplicationSpecMatchAPI(tfObject map[string]any, apiObject *admin.ReplicationSpec) bool { +func doesAdvancedReplicationSpecMatchAPI(tfObject map[string]any, apiObject *admin20240530.ReplicationSpec) bool { return tfObject["id"] == apiObject.GetId() || (tfObject["id"] == nil && tfObject["zone_name"] == apiObject.GetZoneName()) } -func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.ReplicationSpec, tfMapObject map[string]any, - d *schema.ResourceData, connV2 *admin.APIClient) (map[string]any, error) { +func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin20240530.ReplicationSpec, tfMapObject map[string]any, + d *schema.ResourceData, connV220240530 *admin20240530.APIClient) (map[string]any, error) { if apiObject == nil { return nil, nil } @@ -506,14 +506,14 @@ func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.Replic tfMap["num_shards"] = apiObject.GetNumShards() tfMap["id"] = apiObject.GetId() if tfMapObject != nil { - object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), tfMapObject["region_configs"].([]any), d, connV2) + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), tfMapObject["region_configs"].([]any), d, connV220240530) if err != nil { return nil, err } tfMap["region_configs"] = object tfMap["container_id"] = containerIDs } else { - object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), nil, d, connV2) + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), nil, d, connV220240530) if err != nil { return nil, err } @@ -525,8 +525,8 @@ func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.Replic return tfMap, nil } -func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects []admin.CloudRegionConfig, tfMapObjects []any, - d *schema.ResourceData, connV2 *admin.APIClient) (tfResult []map[string]any, containersIDs map[string]string, err error) { +func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects []admin20240530.CloudRegionConfig, tfMapObjects []any, + d *schema.ResourceData, connV220240530 *admin20240530.APIClient) (tfResult []map[string]any, containersIDs map[string]string, err error) { if len(apiObjects) == 0 { return nil, nil, nil } @@ -544,11 +544,11 @@ func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects } if apiObject.GetProviderName() != "TENANT" { - params := &admin.ListPeeringContainerByCloudProviderApiParams{ + params := &admin20240530.ListPeeringContainerByCloudProviderApiParams{ GroupId: d.Get("project_id").(string), ProviderName: apiObject.ProviderName, } - containers, _, err := connV2.NetworkPeeringApi.ListPeeringContainerByCloudProviderWithParams(ctx, params).Execute() + containers, _, err := connV220240530.NetworkPeeringApi.ListPeeringContainerByCloudProviderWithParams(ctx, params).Execute() if err != nil { return nil, nil, err } @@ -561,7 +561,7 @@ func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects return tfList, containerIDs, nil } -func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin.CloudRegionConfig, tfMapObject map[string]any) map[string]any { +func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin20240530.CloudRegionConfig, tfMapObject map[string]any) map[string]any { if apiObject == nil { return nil } @@ -599,11 +599,11 @@ func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin.CloudRegionConf return tfMap } -func hwSpecToDedicatedHwSpec(apiObject *admin.HardwareSpec) *admin.DedicatedHardwareSpec { +func hwSpecToDedicatedHwSpec(apiObject *admin20240530.HardwareSpec) *admin20240530.DedicatedHardwareSpec { if apiObject == nil { return nil } - return &admin.DedicatedHardwareSpec{ + return &admin20240530.DedicatedHardwareSpec{ NodeCount: apiObject.NodeCount, DiskIOPS: apiObject.DiskIOPS, EbsVolumeType: apiObject.EbsVolumeType, @@ -611,11 +611,11 @@ func hwSpecToDedicatedHwSpec(apiObject *admin.HardwareSpec) *admin.DedicatedHard } } -func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec) *admin.HardwareSpec { +func dedicatedHwSpecToHwSpec(apiObject *admin20240530.DedicatedHardwareSpec) *admin20240530.HardwareSpec { if apiObject == nil { return nil } - return &admin.HardwareSpec{ + return &admin20240530.HardwareSpec{ NodeCount: apiObject.NodeCount, DiskIOPS: apiObject.DiskIOPS, EbsVolumeType: apiObject.EbsVolumeType, @@ -623,7 +623,7 @@ func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec) *admin.Hard } } -func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHardwareSpec, providerName string, tfMapObjects []any) []map[string]any { +func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin20240530.DedicatedHardwareSpec, providerName string, tfMapObjects []any) []map[string]any { if apiObject == nil { return nil } @@ -661,7 +661,7 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHa return tfList } -func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin.AdvancedAutoScalingSettings) []map[string]any { +func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin20240530.AdvancedAutoScalingSettings) []map[string]any { if apiObject == nil { return nil } @@ -680,7 +680,7 @@ func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin.AdvancedAutoScal return tfList } -func getAdvancedClusterContainerID(containers []admin.CloudProviderContainer, cluster *admin.CloudRegionConfig) string { +func getAdvancedClusterContainerID(containers []admin20240530.CloudProviderContainer, cluster *admin20240530.CloudRegionConfig) string { if len(containers) == 0 { return "" } @@ -697,8 +697,8 @@ func getAdvancedClusterContainerID(containers []admin.CloudProviderContainer, cl return "" } -func expandProcessArgs(d *schema.ResourceData, p map[string]any) admin.ClusterDescriptionProcessArgs { - res := admin.ClusterDescriptionProcessArgs{} +func expandProcessArgs(d *schema.ResourceData, p map[string]any) admin20240530.ClusterDescriptionProcessArgs { + res := admin20240530.ClusterDescriptionProcessArgs{} if _, ok := d.GetOkExists("advanced_configuration.0.default_read_concern"); ok { res.DefaultReadConcern = conversion.StringPtr(cast.ToString(p["default_read_concern"])) @@ -758,16 +758,16 @@ func expandProcessArgs(d *schema.ResourceData, p map[string]any) admin.ClusterDe return res } -func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin.ComponentLabel, diag.Diagnostics) { +func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin20240530.ComponentLabel, diag.Diagnostics) { list := d.Get("labels").(*schema.Set) - res := make([]admin.ComponentLabel, list.Len()) + res := make([]admin20240530.ComponentLabel, list.Len()) for i, val := range list.List() { v := val.(map[string]any) key := v["key"].(string) if key == ignoreLabel { return nil, diag.FromErr(fmt.Errorf("you should not set `Infrastructure Tool` label, it is used for internal purposes")) } - res[i] = admin.ComponentLabel{ + res[i] = admin20240530.ComponentLabel{ Key: conversion.StringPtr(key), Value: conversion.StringPtr(v["value"].(string)), } @@ -775,11 +775,11 @@ func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin.ComponentLab return res, nil } -func expandAdvancedReplicationSpecs(tfList []any) *[]admin.ReplicationSpec { +func expandAdvancedReplicationSpecs(tfList []any) *[]admin20240530.ReplicationSpec { if len(tfList) == 0 { return nil } - var apiObjects []admin.ReplicationSpec + var apiObjects []admin20240530.ReplicationSpec for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { @@ -791,8 +791,8 @@ func expandAdvancedReplicationSpecs(tfList []any) *[]admin.ReplicationSpec { return &apiObjects } -func expandAdvancedReplicationSpec(tfMap map[string]any) *admin.ReplicationSpec { - apiObject := &admin.ReplicationSpec{ +func expandAdvancedReplicationSpec(tfMap map[string]any) *admin20240530.ReplicationSpec { + apiObject := &admin20240530.ReplicationSpec{ NumShards: conversion.Pointer(tfMap["num_shards"].(int)), ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), RegionConfigs: expandRegionConfigs(tfMap["region_configs"].([]any)), @@ -803,11 +803,11 @@ func expandAdvancedReplicationSpec(tfMap map[string]any) *admin.ReplicationSpec return apiObject } -func expandRegionConfigs(tfList []any) *[]admin.CloudRegionConfig { +func expandRegionConfigs(tfList []any) *[]admin20240530.CloudRegionConfig { if len(tfList) == 0 { return nil } - var apiObjects []admin.CloudRegionConfig + var apiObjects []admin20240530.CloudRegionConfig for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { @@ -820,9 +820,9 @@ func expandRegionConfigs(tfList []any) *[]admin.CloudRegionConfig { return &apiObjects } -func expandRegionConfig(tfMap map[string]any) *admin.CloudRegionConfig { +func expandRegionConfig(tfMap map[string]any) *admin20240530.CloudRegionConfig { providerName := tfMap["provider_name"].(string) - apiObject := &admin.CloudRegionConfig{ + apiObject := &admin20240530.CloudRegionConfig{ Priority: conversion.Pointer(cast.ToInt(tfMap["priority"])), ProviderName: conversion.StringPtr(providerName), RegionName: conversion.StringPtr(tfMap["region_name"].(string)), @@ -849,9 +849,9 @@ func expandRegionConfig(tfMap map[string]any) *admin.CloudRegionConfig { return apiObject } -func expandRegionConfigSpec(tfList []any, providerName string) *admin.DedicatedHardwareSpec { +func expandRegionConfigSpec(tfList []any, providerName string) *admin20240530.DedicatedHardwareSpec { tfMap, _ := tfList[0].(map[string]any) - apiObject := new(admin.DedicatedHardwareSpec) + apiObject := new(admin20240530.DedicatedHardwareSpec) if providerName == constant.AWS || providerName == constant.AZURE { if v, ok := tfMap["disk_iops"]; ok && v.(int) > 0 { apiObject.DiskIOPS = conversion.Pointer(v.(int)) @@ -871,11 +871,11 @@ func expandRegionConfigSpec(tfList []any, providerName string) *admin.DedicatedH return apiObject } -func expandRegionConfigAutoScaling(tfList []any) *admin.AdvancedAutoScalingSettings { +func expandRegionConfigAutoScaling(tfList []any) *admin20240530.AdvancedAutoScalingSettings { tfMap, _ := tfList[0].(map[string]any) - settings := admin.AdvancedAutoScalingSettings{ - DiskGB: new(admin.DiskGBAutoScaling), - Compute: new(admin.AdvancedComputeAutoScaling), + settings := admin20240530.AdvancedAutoScalingSettings{ + DiskGB: new(admin20240530.DiskGBAutoScaling), + Compute: new(admin20240530.AdvancedComputeAutoScaling), } if v, ok := tfMap["disk_gb_enabled"]; ok { diff --git a/internal/service/advancedcluster/model_advanced_cluster_test.go b/internal/service/advancedcluster/model_advanced_cluster_test.go index 7598e8511b..5e9a5726fb 100644 --- a/internal/service/advancedcluster/model_advanced_cluster_test.go +++ b/internal/service/advancedcluster/model_advanced_cluster_test.go @@ -12,15 +12,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20240530002/admin" - "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + mockadmin20240530 "go.mongodb.org/atlas-sdk/v20240530005/mockadmin" ) var ( dummyClusterName = "clusterName" dummyProjectID = "projectId" errGeneric = errors.New("generic") - advancedClusters = []admin.AdvancedClusterDescription{{StateName: conversion.StringPtr("NOT IDLE")}} + advancedClusters = []admin20240530.AdvancedClusterDescription{{StateName: conversion.StringPtr("NOT IDLE")}} ) func TestFlattenReplicationSpecs(t *testing.T) { @@ -31,7 +31,7 @@ func TestFlattenReplicationSpecs(t *testing.T) { unexpectedID = "id2" expectedZoneName = "z1" unexpectedZoneName = "z2" - regionConfigAdmin = []admin.CloudRegionConfig{{ + regionConfigAdmin = []admin20240530.CloudRegionConfig{{ ProviderName: &providerName, RegionName: ®ionName, }} @@ -44,8 +44,8 @@ func TestFlattenReplicationSpecs(t *testing.T) { "region_name": regionName, "zone_name": unexpectedZoneName, } - apiSpecExpected = admin.ReplicationSpec{Id: &expectedID, ZoneName: &expectedZoneName, RegionConfigs: ®ionConfigAdmin} - apiSpecDifferent = admin.ReplicationSpec{Id: &unexpectedID, ZoneName: &unexpectedZoneName, RegionConfigs: ®ionConfigAdmin} + apiSpecExpected = admin20240530.ReplicationSpec{Id: &expectedID, ZoneName: &expectedZoneName, RegionConfigs: ®ionConfigAdmin} + apiSpecDifferent = admin20240530.ReplicationSpec{Id: &unexpectedID, ZoneName: &unexpectedZoneName, RegionConfigs: ®ionConfigAdmin} testSchema = map[string]*schema.Schema{ "project_id": {Type: schema.TypeString}, } @@ -75,60 +75,60 @@ func TestFlattenReplicationSpecs(t *testing.T) { } ) testCases := map[string]struct { - adminSpecs []admin.ReplicationSpec + adminSpecs []admin20240530.ReplicationSpec tfInputSpecs []any expectedLen int }{ "empty admin spec should return empty list": { - []admin.ReplicationSpec{}, + []admin20240530.ReplicationSpec{}, []any{tfSameIDSameZone}, 0, }, "existing id, should match admin": { - []admin.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfSameIDSameZone}, 1, }, "existing different id, should change to admin spec": { - []admin.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfdiffIDDiffZone}, 1, }, "missing id, should be set when zone_name matches": { - []admin.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfNoIDSameZone}, 1, }, "missing id and diff zone, should change to admin spec": { - []admin.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfNoIDDiffZone}, 1, }, "existing id, should match correct api spec using `id` and extra api spec added": { - []admin.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, []any{tfSameIDSameZone}, 2, }, "missing id, should match correct api spec using `zone_name` and extra api spec added": { - []admin.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, []any{tfNoIDSameZone}, 2, }, "two matching specs should be set to api specs": { - []admin.ReplicationSpec{apiSpecExpected, apiSpecDifferent}, + []admin20240530.ReplicationSpec{apiSpecExpected, apiSpecDifferent}, []any{tfSameIDSameZone, tfdiffIDDiffZone}, 2, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - peeringAPI := mockadmin.NetworkPeeringApi{} + peeringAPI := mockadmin20240530.NetworkPeeringApi{} - peeringAPI.EXPECT().ListPeeringContainerByCloudProviderWithParams(mock.Anything, mock.Anything).Return(admin.ListPeeringContainerByCloudProviderApiRequest{ApiService: &peeringAPI}) - containerResult := []admin.CloudProviderContainer{{Id: conversion.StringPtr("c1"), RegionName: ®ionName, ProviderName: &providerName}} - peeringAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.Anything).Return(&admin.PaginatedCloudProviderContainer{Results: &containerResult}, nil, nil) + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderWithParams(mock.Anything, mock.Anything).Return(admin20240530.ListPeeringContainerByCloudProviderApiRequest{ApiService: &peeringAPI}) + containerResult := []admin20240530.CloudProviderContainer{{Id: conversion.StringPtr("c1"), RegionName: ®ionName, ProviderName: &providerName}} + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.Anything).Return(&admin20240530.PaginatedCloudProviderContainer{Results: &containerResult}, nil, nil) - client := &admin.APIClient{ + client := &admin20240530.APIClient{ NetworkPeeringApi: &peeringAPI, } resourceData := schema.TestResourceDataRaw(t, testSchema, map[string]any{"project_id": "p1"}) @@ -153,7 +153,7 @@ type Result struct { func TestUpgradeRefreshFunc(t *testing.T) { testCases := []struct { - mockCluster *admin.AdvancedClusterDescription + mockCluster *admin20240530.AdvancedClusterDescription mockResponse *http.Response expectedResult Result mockError error @@ -215,11 +215,11 @@ func TestUpgradeRefreshFunc(t *testing.T) { }, { name: "Successful", - mockCluster: &admin.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, + mockCluster: &admin20240530.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ - response: &admin.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, + response: &admin20240530.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, state: "stateName", error: nil, }, @@ -228,9 +228,9 @@ func TestUpgradeRefreshFunc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testObject := mockadmin.NewClustersApi(t) + testObject := mockadmin20240530.NewClustersApi(t) - testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin.GetClusterApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin20240530.GetClusterApiRequest{ApiService: testObject}).Once() testObject.EXPECT().GetClusterExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() result, stateName, err := advancedcluster.UpgradeRefreshFunc(context.Background(), dummyClusterName, dummyProjectID, testObject)() @@ -247,7 +247,7 @@ func TestUpgradeRefreshFunc(t *testing.T) { func TestResourceListAdvancedRefreshFunc(t *testing.T) { testCases := []struct { - mockCluster *admin.PaginatedAdvancedClusterDescription + mockCluster *admin20240530.PaginatedAdvancedClusterDescription mockResponse *http.Response expectedResult Result mockError error @@ -309,7 +309,7 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { }, { name: "Successful but with at least one cluster not idle", - mockCluster: &admin.PaginatedAdvancedClusterDescription{Results: &advancedClusters}, + mockCluster: &admin20240530.PaginatedAdvancedClusterDescription{Results: &advancedClusters}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ @@ -320,11 +320,11 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { }, { name: "Successful", - mockCluster: &admin.PaginatedAdvancedClusterDescription{}, + mockCluster: &admin20240530.PaginatedAdvancedClusterDescription{}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ - response: &admin.PaginatedAdvancedClusterDescription{}, + response: &admin20240530.PaginatedAdvancedClusterDescription{}, state: "IDLE", error: nil, }, @@ -333,9 +333,9 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testObject := mockadmin.NewClustersApi(t) + testObject := mockadmin20240530.NewClustersApi(t) - testObject.EXPECT().ListClusters(mock.Anything, mock.Anything).Return(admin.ListClustersApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().ListClusters(mock.Anything, mock.Anything).Return(admin20240530.ListClustersApiRequest{ApiService: testObject}).Once() testObject.EXPECT().ListClustersExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() result, stateName, err := advancedcluster.ResourceClusterListAdvancedRefreshFunc(context.Background(), dummyProjectID, testObject)() diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 08165d1412..5257ff1124 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -21,7 +21,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) const ( @@ -366,10 +366,10 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf("accept_data_risks_and_force_replica_set_reconfig can not be set in creation, only in update")) } } - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) - params := &admin.AdvancedClusterDescription{ + params := &admin20240530.AdvancedClusterDescription{ Name: conversion.StringPtr(cast.ToString(d.Get("name"))), ClusterType: conversion.StringPtr(cast.ToString(d.Get("cluster_type"))), ReplicationSpecs: expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any)), @@ -397,7 +397,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if _, ok := d.GetOk("tags"); ok { - params.Tags = conversion.ExpandTagsFromSetSchema(d) + params.Tags = conversion.ExpandTagsFromSetSchemaOldSDK(d) } if v, ok := d.GetOk("mongo_db_major_version"); ok { params.MongoDBMajorVersion = conversion.StringPtr(FormatMongoDBMajorVersion(v.(string))) @@ -425,13 +425,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - cluster, _, err := connV2.ClustersApi.CreateCluster(ctx, projectID, params).Execute() + cluster, _, err := connV220240530.ClustersApi.CreateCluster(ctx, projectID, params).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) } timeout := d.Timeout(schema.TimeoutCreate) - stateConf := CreateStateChangeConfig(ctx, connV2, projectID, d.Get("name").(string), timeout) + stateConf := CreateStateChangeConfig(ctx, connV220240530, projectID, d.Get("name").(string), timeout) _, err = stateConf.WaitForStateContext(ctx) if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) @@ -440,7 +440,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if ac, ok := d.GetOk("advanced_configuration"); ok { if aclist, ok := ac.([]any); ok && len(aclist) > 0 { params := expandProcessArgs(d, aclist[0].(map[string]any)) - _, _, err := connV2.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, cluster.GetName(), ¶ms).Execute() + _, _, err := connV220240530.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, cluster.GetName(), ¶ms).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorConfigUpdate, cluster.GetName(), err)) } @@ -448,10 +448,10 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if v := d.Get("paused").(bool); v { - request := &admin.AdvancedClusterDescription{ + request := &admin20240530.AdvancedClusterDescription{ Paused: conversion.Pointer(v), } - _, _, err = updateAdvancedCluster(ctx, connV2, request, projectID, d.Get("name").(string), timeout) + _, _, err = updateAdvancedCluster(ctx, connV220240530, request, projectID, d.Get("name").(string), timeout) if err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, d.Get("name").(string), err)) } @@ -466,11 +466,11 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return resourceRead(ctx, d, meta) } -func CreateStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { +func CreateStateChangeConfig(ctx context.Context, connV220240530 *admin20240530.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { return retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"}, Target: []string{"IDLE"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), Timeout: timeout, MinTimeout: 1 * time.Minute, Delay: 3 * time.Minute, @@ -478,12 +478,12 @@ func CreateStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, proje } func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + cluster, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -528,7 +528,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "labels", clusterName, err)) } - if err := d.Set("tags", conversion.FlattenTags(cluster.GetTags())); err != nil { + if err := d.Set("tags", conversion.FlattenTagsOldSDK(cluster.GetTags())); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "tags", clusterName, err)) } @@ -552,7 +552,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "pit_enabled", clusterName, err)) } - replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV2) + replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV220240530) if err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } @@ -585,7 +585,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err)) } - processArgs, _, err := connV2.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() + processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorConfigRead, clusterName, err)) } @@ -606,18 +606,18 @@ func resourceUpdateOrUpgrade(ctx context.Context, d *schema.ResourceData, meta a } func resourceUpgrade(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - upgradeRequest := ctx.Value(upgradeRequestCtxKey).(*admin.LegacyAtlasTenantClusterUpgradeRequest) + upgradeRequest := ctx.Value(upgradeRequestCtxKey).(*admin20240530.LegacyAtlasTenantClusterUpgradeRequest) if upgradeRequest == nil { return diag.FromErr(fmt.Errorf("upgrade called without %s in ctx", string(upgradeRequestCtxKey))) } - upgradeResponse, _, err := upgradeCluster(ctx, connV2, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) + upgradeResponse, _, err := upgradeCluster(ctx, connV220240530, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) @@ -633,13 +633,13 @@ func resourceUpgrade(ctx context.Context, d *schema.ResourceData, meta any) diag } func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - cluster := new(admin.AdvancedClusterDescription) - clusterChangeDetect := new(admin.AdvancedClusterDescription) + cluster := new(admin20240530.AdvancedClusterDescription) + clusterChangeDetect := new(admin20240530.AdvancedClusterDescription) if d.HasChange("backup_enabled") { cluster.BackupEnabled = conversion.Pointer(d.Get("backup_enabled").(bool)) @@ -670,7 +670,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if d.HasChange("tags") { - cluster.Tags = conversion.ExpandTagsFromSetSchema(d) + cluster.Tags = conversion.ExpandTagsFromSetSchemaOldSDK(d) } if d.HasChange("mongo_db_major_version") { @@ -721,8 +721,8 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. ac := d.Get("advanced_configuration") if aclist, ok := ac.([]any); ok && len(aclist) > 0 { params := expandProcessArgs(d, aclist[0].(map[string]any)) - if !reflect.DeepEqual(params, admin.ClusterDescriptionProcessArgs{}) { - _, _, err := connV2.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() + if !reflect.DeepEqual(params, admin20240530.ClusterDescriptionProcessArgs{}) { + _, _, err := connV220240530.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorConfigUpdate, clusterName, err)) } @@ -733,7 +733,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. // Has changes if !reflect.DeepEqual(cluster, clusterChangeDetect) { err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { - _, resp, err := updateAdvancedCluster(ctx, connV2, cluster, projectID, clusterName, timeout) + _, resp, err := updateAdvancedCluster(ctx, connV220240530, cluster, projectID, clusterName, timeout) if err != nil { if resp == nil || resp.StatusCode == 400 { return retry.NonRetryableError(fmt.Errorf(errorUpdate, clusterName, err)) @@ -748,10 +748,10 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if d.Get("paused").(bool) { - clusterRequest := &admin.AdvancedClusterDescription{ + clusterRequest := &admin20240530.AdvancedClusterDescription{ Paused: conversion.Pointer(true), } - _, _, err := updateAdvancedCluster(ctx, connV2, clusterRequest, projectID, clusterName, timeout) + _, _, err := updateAdvancedCluster(ctx, connV220240530, clusterRequest, projectID, clusterName, timeout) if err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } @@ -761,12 +761,12 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - params := &admin.DeleteClusterApiParams{ + params := &admin20240530.DeleteClusterApiParams{ GroupId: projectID, ClusterName: clusterName, } @@ -774,14 +774,14 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. params.RetainBackups = conversion.Pointer(v.(bool)) } - _, err := connV2.ClustersApi.DeleteClusterWithParams(ctx, params).Execute() + _, err := connV220240530.ClustersApi.DeleteClusterWithParams(ctx, params).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorDelete, clusterName, err)) } log.Println("[INFO] Waiting for MongoDB ClusterAdvanced to be destroyed") - stateConf := DeleteStateChangeConfig(ctx, connV2, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) + stateConf := DeleteStateChangeConfig(ctx, connV220240530, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) // Wait, catching any errors _, err = stateConf.WaitForStateContext(ctx) if err != nil { @@ -791,11 +791,11 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. return nil } -func DeleteStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { +func DeleteStateChangeConfig(ctx context.Context, connV220240530 *admin20240530.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { return retry.StateChangeConf{ Pending: []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"}, Target: []string{"DELETED"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, @@ -803,14 +803,14 @@ func DeleteStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, proje } func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID, name, err := splitSClusterAdvancedImportID(d.Id()) if err != nil { return nil, err } - cluster, _, err := connV2.ClustersApi.GetCluster(ctx, *projectID, *name).Execute() + cluster, _, err := connV220240530.ClustersApi.GetCluster(ctx, *projectID, *name).Execute() if err != nil { return nil, fmt.Errorf("couldn't import cluster %s in project %s, error: %s", *name, *projectID, err) } @@ -832,10 +832,10 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func upgradeCluster(ctx context.Context, connV2 *admin.APIClient, request *admin.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin.LegacyAtlasCluster, *http.Response, error) { +func upgradeCluster(ctx context.Context, connV220240530 *admin20240530.APIClient, request *admin20240530.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin20240530.LegacyAtlasCluster, *http.Response, error) { request.Name = name - cluster, resp, err := connV2.ClustersApi.UpgradeSharedCluster(ctx, projectID, request).Execute() + cluster, resp, err := connV220240530.ClustersApi.UpgradeSharedCluster(ctx, projectID, request).Execute() if err != nil { return nil, nil, err } @@ -843,7 +843,7 @@ func upgradeCluster(ctx context.Context, connV2 *admin.APIClient, request *admin stateConf := &retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, Target: []string{"IDLE"}, - Refresh: UpgradeRefreshFunc(ctx, name, projectID, connV2.ClustersApi), + Refresh: UpgradeRefreshFunc(ctx, name, projectID, connV220240530.ClustersApi), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, @@ -873,9 +873,9 @@ func splitSClusterAdvancedImportID(id string) (projectID, clusterName *string, e return } -func resourceRefreshFunc(ctx context.Context, name, projectID string, connV2 *admin.APIClient) retry.StateRefreshFunc { +func resourceRefreshFunc(ctx context.Context, name, projectID string, connV220240530 *admin20240530.APIClient) retry.StateRefreshFunc { return func() (any, string, error) { - cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, name).Execute() + cluster, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, name).Execute() if err != nil && strings.Contains(err.Error(), "reset by peer") { return nil, "REPEATING", nil } @@ -908,7 +908,7 @@ func replicationSpecsHashSet(v any) int { return schema.HashString(buf.String()) } -func getUpgradeRequest(d *schema.ResourceData) *admin.LegacyAtlasTenantClusterUpgradeRequest { +func getUpgradeRequest(d *schema.ResourceData) *admin20240530.LegacyAtlasTenantClusterUpgradeRequest { if !d.HasChange("replication_specs") { return nil } @@ -929,8 +929,8 @@ func getUpgradeRequest(d *schema.ResourceData) *admin.LegacyAtlasTenantClusterUp return nil } - return &admin.LegacyAtlasTenantClusterUpgradeRequest{ - ProviderSettings: &admin.ClusterProviderSettings{ + return &admin20240530.LegacyAtlasTenantClusterUpgradeRequest{ + ProviderSettings: &admin20240530.ClusterProviderSettings{ ProviderName: updatedRegion.GetProviderName(), InstanceSizeName: updatedRegion.ElectableSpecs.InstanceSize, RegionName: updatedRegion.RegionName, @@ -940,12 +940,12 @@ func getUpgradeRequest(d *schema.ResourceData) *admin.LegacyAtlasTenantClusterUp func updateAdvancedCluster( ctx context.Context, - connV2 *admin.APIClient, - request *admin.AdvancedClusterDescription, + connV220240530 *admin20240530.APIClient, + request *admin20240530.AdvancedClusterDescription, projectID, name string, timeout time.Duration, -) (*admin.AdvancedClusterDescription, *http.Response, error) { - cluster, resp, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, name, request).Execute() +) (*admin20240530.AdvancedClusterDescription, *http.Response, error) { + cluster, resp, err := connV220240530.ClustersApi.UpdateCluster(ctx, projectID, name, request).Execute() if err != nil { return nil, nil, err } @@ -953,7 +953,7 @@ func updateAdvancedCluster( stateConf := &retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, Target: []string{"IDLE"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 8f72ce42de..aa4f3efcda 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) const ( @@ -219,7 +219,7 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - processArgs = &admin.ClusterDescriptionProcessArgs{ + processArgs = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("1"), FailIndexKeyTooLong: conversion.Pointer(false), @@ -231,7 +231,7 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { SampleSizeBIConnector: conversion.Pointer(110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), } - processArgsUpdated = &admin.ClusterDescriptionProcessArgs{ + processArgsUpdated = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("0"), FailIndexKeyTooLong: conversion.Pointer(false), @@ -267,7 +267,7 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) { projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - processArgs = &admin.ClusterDescriptionProcessArgs{ + processArgs = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("1"), JavascriptEnabled: conversion.Pointer(true), @@ -277,7 +277,7 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) { SampleRefreshIntervalBIConnector: conversion.Pointer(310), SampleSizeBIConnector: conversion.Pointer(110), } - processArgsUpdated = &admin.ClusterDescriptionProcessArgs{ + processArgsUpdated = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("majority"), JavascriptEnabled: conversion.Pointer(true), @@ -312,13 +312,13 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAutoScaling(t *testing. projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - autoScaling = &admin.AdvancedAutoScalingSettings{ - Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, - DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScaling = &admin20240530.AdvancedAutoScalingSettings{ + Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, + DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } - autoScalingUpdated = &admin.AdvancedAutoScalingSettings{ - Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, - DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScalingUpdated = &admin20240530.AdvancedAutoScalingSettings{ + Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, + DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } ) @@ -354,13 +354,13 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAnalyticsAutoScaling(t projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - autoScaling = &admin.AdvancedAutoScalingSettings{ - Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, - DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScaling = &admin20240530.AdvancedAutoScalingSettings{ + Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, + DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } - autoScalingUpdated = &admin.AdvancedAutoScalingSettings{ - Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, - DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScalingUpdated = &admin20240530.AdvancedAutoScalingSettings{ + Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, + DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } ) @@ -881,7 +881,7 @@ func checkSingleProviderPaused(name string, paused bool) resource.TestCheckFunc "paused": strconv.FormatBool(paused)}) } -func configAdvanced(projectID, clusterName string, p *admin.ClusterDescriptionProcessArgs) string { +func configAdvanced(projectID, clusterName string, p *admin20240530.ClusterDescriptionProcessArgs) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q @@ -947,7 +947,7 @@ func checkAdvanced(name, tls string) resource.TestCheckFunc { resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name")) } -func configAdvancedDefaultWrite(projectID, clusterName string, p *admin.ClusterDescriptionProcessArgs) string { +func configAdvancedDefaultWrite(projectID, clusterName string, p *admin20240530.ClusterDescriptionProcessArgs) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q @@ -1013,7 +1013,7 @@ func checkAdvancedDefaultWrite(name, writeConcern, tls string) resource.TestChec resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name")) } -func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string { +func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin20240530.AdvancedAutoScalingSettings) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q @@ -1044,7 +1044,7 @@ func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin.A `, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize()) } -func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string { +func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p *admin20240530.AdvancedAutoScalingSettings) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q diff --git a/internal/service/alertconfiguration/data_source_alert_configuration.go b/internal/service/alertconfiguration/data_source_alert_configuration.go index 2aebfb60c1..8909a10c18 100644 --- a/internal/service/alertconfiguration/data_source_alert_configuration.go +++ b/internal/service/alertconfiguration/data_source_alert_configuration.go @@ -14,7 +14,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/zclconf/go-cty/cty" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) var _ datasource.DataSource = &alertConfigurationDS{} diff --git a/internal/service/alertconfiguration/data_source_alert_configurations.go b/internal/service/alertconfiguration/data_source_alert_configurations.go index 6b178aae06..f3ab30d2bf 100644 --- a/internal/service/alertconfiguration/data_source_alert_configurations.go +++ b/internal/service/alertconfiguration/data_source_alert_configurations.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const alertConfigurationsDataSourceName = "alert_configurations" diff --git a/internal/service/alertconfiguration/model_alert_configuration.go b/internal/service/alertconfiguration/model_alert_configuration.go index ab2b50c2ba..2c7e6571e5 100644 --- a/internal/service/alertconfiguration/model_alert_configuration.go +++ b/internal/service/alertconfiguration/model_alert_configuration.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewNotificationList(list []TfNotificationModel) (*[]admin.AlertsNotificationRootForGroup, error) { diff --git a/internal/service/alertconfiguration/model_alert_configuration_test.go b/internal/service/alertconfiguration/model_alert_configuration_test.go index 7fa162fd7d..ac63c13e83 100644 --- a/internal/service/alertconfiguration/model_alert_configuration_test.go +++ b/internal/service/alertconfiguration/model_alert_configuration_test.go @@ -7,7 +7,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/alertconfiguration" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/alertconfiguration/resource_alert_configuration.go b/internal/service/alertconfiguration/resource_alert_configuration.go index 9840cacb37..24080129b6 100644 --- a/internal/service/alertconfiguration/resource_alert_configuration.go +++ b/internal/service/alertconfiguration/resource_alert_configuration.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/apikey/data_source_api_keys.go b/internal/service/apikey/data_source_api_keys.go index 85ef1db062..19744e8f27 100644 --- a/internal/service/apikey/data_source_api_keys.go +++ b/internal/service/apikey/data_source_api_keys.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/apikey/resource_api_key.go b/internal/service/apikey/resource_api_key.go index 2bbd1449c9..f6731c64d4 100644 --- a/internal/service/apikey/resource_api_key.go +++ b/internal/service/apikey/resource_api_key.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/atlasuser/data_source_atlas_user.go b/internal/service/atlasuser/data_source_atlas_user.go index 5bae40ac96..7a662e55e4 100644 --- a/internal/service/atlasuser/data_source_atlas_user.go +++ b/internal/service/atlasuser/data_source_atlas_user.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/atlasuser/data_source_atlas_user_test.go b/internal/service/atlasuser/data_source_atlas_user_test.go index 42d0a594e4..56ace2bf2d 100644 --- a/internal/service/atlasuser/data_source_atlas_user_test.go +++ b/internal/service/atlasuser/data_source_atlas_user_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestAccConfigDSAtlasUser_ByUserID(t *testing.T) { diff --git a/internal/service/atlasuser/data_source_atlas_users.go b/internal/service/atlasuser/data_source_atlas_users.go index 70f6973475..e036e942b3 100644 --- a/internal/service/atlasuser/data_source_atlas_users.go +++ b/internal/service/atlasuser/data_source_atlas_users.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/atlasuser/data_source_atlas_users_test.go b/internal/service/atlasuser/data_source_atlas_users_test.go index 29f926b319..af0baf55c4 100644 --- a/internal/service/atlasuser/data_source_atlas_users_test.go +++ b/internal/service/atlasuser/data_source_atlas_users_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/atlasuser" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestAccConfigDSAtlasUsers_ByOrgID(t *testing.T) { diff --git a/internal/service/auditing/resource_auditing.go b/internal/service/auditing/resource_auditing.go index bd4024eee5..91cffaf374 100644 --- a/internal/service/auditing/resource_auditing.go +++ b/internal/service/auditing/resource_auditing.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go index 8e5a4d2986..39448e2024 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go @@ -8,7 +8,7 @@ import ( "net/http" "strings" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go index ceb01202cc..76b726cc40 100644 --- a/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go @@ -246,12 +246,12 @@ func DataSource() *schema.Resource { } func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) - backupPolicy, _, err := connV2.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + backupPolicy, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() if err != nil { return diag.Errorf(cluster.ErrorSnapshotBackupPolicyRead, clusterName, err) } diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go index 202a3cfb2d..2e9871c0d8 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go @@ -12,7 +12,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) const ( @@ -307,7 +307,7 @@ func Resource() *schema.Resource { func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) @@ -315,7 +315,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. // MongoDB Atlas automatically generates a default backup policy for that cluster. // As a result, we need to first delete the default policies to avoid having // the infrastructure differs from the TF configuration file. - if _, _, err := connV2.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute(); err != nil { + if _, _, err := connV220240530.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute(); err != nil { diagWarning := diag.Diagnostic{ Severity: diag.Warning, Summary: "Error deleting default backup schedule", @@ -324,7 +324,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. diags = append(diags, diagWarning) } - if err := cloudBackupScheduleCreateOrUpdate(ctx, connV2, d, projectID, clusterName); err != nil { + if err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, d, projectID, clusterName); err != nil { diags = append(diags, diag.Errorf(errorSnapshotBackupScheduleCreate, err)...) return diags } @@ -338,13 +338,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - backupPolicy, resp, err := connV2.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() + backupPolicy, resp, err := connV220240530.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -417,7 +417,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di } func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] @@ -429,7 +429,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - err := cloudBackupScheduleCreateOrUpdate(ctx, connV2, d, projectID, clusterName) + err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, d, projectID, clusterName) if err != nil { return diag.Errorf(errorSnapshotBackupScheduleUpdate, err) } @@ -438,12 +438,12 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - _, _, err := connV2.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute() + _, _, err := connV220240530.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute() if err != nil { return diag.Errorf("error deleting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) } @@ -454,7 +454,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 parts := strings.SplitN(d.Id(), "-", 2) if len(parts) != 2 { @@ -464,7 +464,7 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s projectID := parts[0] clusterName := parts[1] - _, _, err := connV2.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + _, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() if err != nil { return nil, fmt.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) } @@ -485,19 +485,19 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV2 *admin.APIClient, d *schema.ResourceData, projectID, clusterName string) error { - resp, _, err := connV2.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() +func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220240530 *admin20240530.APIClient, d *schema.ResourceData, projectID, clusterName string) error { + resp, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() if err != nil { return fmt.Errorf("error getting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) } - req := &admin.DiskBackupSnapshotSchedule{} + req := &admin20240530.DiskBackupSnapshotSchedule{} copySettings := d.Get("copy_settings") if copySettings != nil && (conversion.HasElementsSliceOrMap(copySettings) || d.HasChange("copy_settings")) { req.CopySettings = expandCopySettings(copySettings.([]any)) } - var policiesItem []admin.DiskBackupApiPolicyItem + var policiesItem []admin20240530.DiskBackupApiPolicyItem if v, ok := d.GetOk("policy_item_hourly"); ok { item := v.([]any) @@ -539,7 +539,7 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV2 *admin.APICli item := v.([]any) itemObj := item[0].(map[string]any) if autoExportEnabled := d.Get("auto_export_enabled"); autoExportEnabled != nil && autoExportEnabled.(bool) { - req.Export = &admin.AutoExportPolicy{ + req.Export = &admin20240530.AutoExportPolicy{ ExportBucketId: conversion.StringPtr(itemObj["export_bucket_id"].(string)), FrequencyType: conversion.StringPtr(itemObj["frequency_type"].(string)), } @@ -551,13 +551,13 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV2 *admin.APICli } if len(policiesItem) > 0 { - policy := admin.AdvancedDiskBackupSnapshotSchedulePolicy{ + policy := admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{ PolicyItems: &policiesItem, } if len(resp.GetPolicies()) == 1 { policy.Id = resp.GetPolicies()[0].Id } - req.Policies = &[]admin.AdvancedDiskBackupSnapshotSchedulePolicy{policy} + req.Policies = &[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{policy} } if v, ok := d.GetOkExists("reference_hour_of_day"); ok { @@ -575,7 +575,7 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV2 *admin.APICli req.UpdateSnapshots = value } - _, _, err = connV2.CloudBackupsApi.UpdateBackupSchedule(context.Background(), projectID, clusterName, req).Execute() + _, _, err = connV220240530.CloudBackupsApi.UpdateBackupSchedule(context.Background(), projectID, clusterName, req).Execute() if err != nil { return err } @@ -583,7 +583,7 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV2 *admin.APICli return nil } -func flattenPolicyItem(items []admin.DiskBackupApiPolicyItem, frequencyType string) []map[string]any { +func flattenPolicyItem(items []admin20240530.DiskBackupApiPolicyItem, frequencyType string) []map[string]any { policyItems := make([]map[string]any, 0) for _, v := range items { if frequencyType == v.GetFrequencyType() { @@ -599,9 +599,9 @@ func flattenPolicyItem(items []admin.DiskBackupApiPolicyItem, frequencyType stri return policyItems } -func flattenExport(roles *admin.DiskBackupSnapshotSchedule) []map[string]any { +func flattenExport(roles *admin20240530.DiskBackupSnapshotSchedule) []map[string]any { exportList := make([]map[string]any, 0) - emptyStruct := admin.DiskBackupSnapshotSchedule{} + emptyStruct := admin20240530.DiskBackupSnapshotSchedule{} if emptyStruct.GetExport() != roles.GetExport() { exportList = append(exportList, map[string]any{ "frequency_type": roles.Export.GetFrequencyType(), @@ -611,7 +611,7 @@ func flattenExport(roles *admin.DiskBackupSnapshotSchedule) []map[string]any { return exportList } -func flattenCopySettings(copySettingList []admin.DiskBackupCopySetting) []map[string]any { +func flattenCopySettings(copySettingList []admin20240530.DiskBackupCopySetting) []map[string]any { copySettings := make([]map[string]any, 0) for _, v := range copySettingList { copySettings = append(copySettings, map[string]any{ @@ -625,13 +625,13 @@ func flattenCopySettings(copySettingList []admin.DiskBackupCopySetting) []map[st return copySettings } -func expandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting { +func expandCopySetting(tfMap map[string]any) *admin20240530.DiskBackupCopySetting { if tfMap == nil { return nil } frequencies := conversion.ExpandStringList(tfMap["frequencies"].(*schema.Set).List()) - copySetting := &admin.DiskBackupCopySetting{ + copySetting := &admin20240530.DiskBackupCopySetting{ CloudProvider: conversion.Pointer(tfMap["cloud_provider"].(string)), Frequencies: &frequencies, RegionName: conversion.Pointer(tfMap["region_name"].(string)), @@ -641,8 +641,8 @@ func expandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting { return copySetting } -func expandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting { - copySettings := make([]admin.DiskBackupCopySetting, 0) +func expandCopySettings(tfList []any) *[]admin20240530.DiskBackupCopySetting { + copySettings := make([]admin20240530.DiskBackupCopySetting, 0) for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) @@ -655,8 +655,8 @@ func expandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting { return ©Settings } -func expandPolicyItem(itemObj map[string]any, frequencyType string) admin.DiskBackupApiPolicyItem { - return admin.DiskBackupApiPolicyItem{ +func expandPolicyItem(itemObj map[string]any, frequencyType string) admin20240530.DiskBackupApiPolicyItem { + return admin20240530.DiskBackupApiPolicyItem{ Id: policyItemID(itemObj), RetentionUnit: itemObj["retention_unit"].(string), RetentionValue: itemObj["retention_value"].(int), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 435f2e103a..999f60f14c 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -7,14 +7,14 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true}) useYearly = mig.IsProviderVersionAtLeast("1.16.0") // attribute introduced in this version - config = configNewPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule{ + config = configNewPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index b2f26f32fd..7cd423ed22 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) var ( @@ -29,7 +29,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configNoPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule{ + Config: configNoPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), @@ -57,7 +57,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ), }, { - Config: configNewPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule{ + Config: configNewPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -100,7 +100,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ), }, { - Config: configAdvancedPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule{ + Config: configAdvancedPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -192,7 +192,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configDefault(&clusterInfo, &admin.DiskBackupSnapshotSchedule{ + Config: configDefault(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), @@ -226,7 +226,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { ), }, { - Config: configOnePolicy(&clusterInfo, &admin.DiskBackupSnapshotSchedule{ + Config: configOnePolicy(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -309,7 +309,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, &admin.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -317,7 +317,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc(checksCreate...), }, { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, &admin.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -338,7 +338,7 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configDefault(&clusterInfo, &admin.DiskBackupSnapshotSchedule{ + Config: configDefault(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), @@ -393,7 +393,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configAzure(&clusterInfo, &admin.DiskBackupApiPolicyItem{ + Config: configAzure(&clusterInfo, &admin20240530.DiskBackupApiPolicyItem{ FrequencyInterval: 1, RetentionUnit: "days", RetentionValue: 1, @@ -406,7 +406,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1")), }, { - Config: configAzure(&clusterInfo, &admin.DiskBackupApiPolicyItem{ + Config: configAzure(&clusterInfo, &admin20240530.DiskBackupApiPolicyItem{ FrequencyInterval: 2, RetentionUnit: "days", RetentionValue: 3, @@ -472,7 +472,7 @@ func checkDestroy(s *terraform.State) error { return nil } -func configNoPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { +func configNoPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -490,7 +490,7 @@ func configNoPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { +func configDefault(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -534,7 +534,7 @@ func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) s `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings bool, p *admin.DiskBackupSnapshotSchedule) string { +func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings bool, p *admin20240530.DiskBackupSnapshotSchedule) string { var copySettings string if !emptyCopySettings { copySettings = fmt.Sprintf(` @@ -591,7 +591,7 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp `, terraformStr, projectID, clusterResourceName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings) } -func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { +func configOnePolicy(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -610,7 +610,7 @@ func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule, useYearly bool) string { +func configNewPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule, useYearly bool) string { var strYearly string if useYearly { strYearly = ` @@ -661,7 +661,7 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedul `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) } -func configAzure(info *acc.ClusterInfo, policy *admin.DiskBackupApiPolicyItem) string { +func configAzure(info *acc.ClusterInfo, policy *admin20240530.DiskBackupApiPolicyItem) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -681,7 +681,7 @@ func configAzure(info *acc.ClusterInfo, policy *admin.DiskBackupApiPolicyItem) s `, info.TerraformNameRef, info.ProjectID, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) } -func configAdvancedPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { +func configAdvancedPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s diff --git a/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go b/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go index c5cc844e62..bf5283b9b2 100644 --- a/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go +++ b/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go index 6c3539b16f..2f852f50de 100644 --- a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go +++ b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go @@ -4,7 +4,7 @@ import ( "errors" "regexp" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func SplitSnapshotImportID(id string) (*admin.GetReplicaSetBackupApiParams, error) { diff --git a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go index 269e98010e..8e2df8d6af 100644 --- a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupsnapshot" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestSplitSnapshotImportID(t *testing.T) { diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go index beb904109f..172f1ad22c 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go @@ -14,7 +14,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cluster" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go index 8f432ca57b..7b6b5b19f3 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go +++ b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go index 4614840a15..cfb2fc4f74 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { @@ -86,11 +86,11 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. request := &admin.DiskBackupSnapshotExportBucket{ IamRoleId: conversion.StringPtr(d.Get("iam_role_id").(string)), - BucketName: conversion.StringPtr(d.Get("bucket_name").(string)), + BucketName: d.Get("bucket_name").(string), RoleId: conversion.StringPtr(d.Get("role_id").(string)), ServiceUrl: conversion.StringPtr(d.Get("service_url").(string)), TenantId: conversion.StringPtr(d.Get("tenant_id").(string)), - CloudProvider: &cloudProvider, + CloudProvider: cloudProvider, } bucketResponse, _, err := conn.CloudBackupsApi.CreateExportBucket(ctx, projectID, request).Execute() diff --git a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go index 216fa1a778..a29f13d3a6 100644 --- a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go +++ b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go index 543f13cd7f..8fe4a0d7a3 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go index 78d743e3ab..61a80a6808 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go +++ b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go index 682e36a27f..2bb1ffc6a6 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go @@ -13,7 +13,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go index 43c2c06a53..0a0a568687 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go @@ -12,7 +12,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) /* diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go index d796f48bdc..dd35fc02ec 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go @@ -6,7 +6,7 @@ import ( "net/http" "regexp" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go index 284d1d04c2..d9271a7baa 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/controlplaneipaddresses/model.go b/internal/service/controlplaneipaddresses/model.go index a99a367c56..e70ec902c0 100644 --- a/internal/service/controlplaneipaddresses/model.go +++ b/internal/service/controlplaneipaddresses/model.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewTFControlPlaneIPAddresses(ctx context.Context, apiResp *admin.ControlPlaneIPAddresses) (*TFControlPlaneIpAddressesModel, diag.Diagnostics) { diff --git a/internal/service/controlplaneipaddresses/model_test.go b/internal/service/controlplaneipaddresses/model_test.go index c550719e7f..7a4e2f48ea 100644 --- a/internal/service/controlplaneipaddresses/model_test.go +++ b/internal/service/controlplaneipaddresses/model_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/controlplaneipaddresses" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) type sdkToTFModelTestCase struct { diff --git a/internal/service/customdbrole/data_source_custom_db_roles.go b/internal/service/customdbrole/data_source_custom_db_roles.go index a46c8f9542..3f7492bbc7 100644 --- a/internal/service/customdbrole/data_source_custom_db_roles.go +++ b/internal/service/customdbrole/data_source_custom_db_roles.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/customdbrole/resource_custom_db_role.go b/internal/service/customdbrole/resource_custom_db_role.go index 1ba4bab266..4043f34be5 100644 --- a/internal/service/customdbrole/resource_custom_db_role.go +++ b/internal/service/customdbrole/resource_custom_db_role.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/customdbrole/resource_custom_db_role_test.go b/internal/service/customdbrole/resource_custom_db_role_test.go index 1dd4663c54..8e9360f71f 100644 --- a/internal/service/customdbrole/resource_custom_db_role_test.go +++ b/internal/service/customdbrole/resource_custom_db_role_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const resourceName = "mongodbatlas_custom_db_role.test" diff --git a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go index 5ce4f48c4e..8fea87b8d2 100644 --- a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go +++ b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/databaseuser/model_database_user.go b/internal/service/databaseuser/model_database_user.go index 113f31f4e5..a27b018149 100644 --- a/internal/service/databaseuser/model_database_user.go +++ b/internal/service/databaseuser/model_database_user.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewMongoDBDatabaseUser(ctx context.Context, statePasswordValue types.String, dbUserModel *TfDatabaseUserModel) (*admin.CloudDatabaseUser, diag.Diagnostics) { diff --git a/internal/service/databaseuser/model_database_user_test.go b/internal/service/databaseuser/model_database_user_test.go index 4ba4f849cb..c829481f22 100644 --- a/internal/service/databaseuser/model_database_user_test.go +++ b/internal/service/databaseuser/model_database_user_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/databaseuser" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) var ( diff --git a/internal/service/databaseuser/resource_database_user_migration_test.go b/internal/service/databaseuser/resource_database_user_migration_test.go index 081a6f8212..6d37e4c860 100644 --- a/internal/service/databaseuser/resource_database_user_migration_test.go +++ b/internal/service/databaseuser/resource_database_user_migration_test.go @@ -3,7 +3,7 @@ package databaseuser_test import ( "testing" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" diff --git a/internal/service/databaseuser/resource_database_user_test.go b/internal/service/databaseuser/resource_database_user_test.go index c384b94de3..1e614f5ec4 100644 --- a/internal/service/databaseuser/resource_database_user_test.go +++ b/internal/service/databaseuser/resource_database_user_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/databaseuser" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go index e772c39cf6..25bdf48651 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const errorDataLakePipelineRunRead = "error reading MongoDB Atlas DataLake Run (%s): %s" diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go index ef548c46b9..c11ba3ae90 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const errorDataLakePipelineRunList = "error reading MongoDB Atlas DataLake Runs (%s): %s" diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go index 41adab2c44..fb4dfffbe9 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const errorDataLakePipelineList = "error creating MongoDB Atlas DataLake Pipelines: %s" diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline.go b/internal/service/datalakepipeline/resource_data_lake_pipeline.go index 9d76b99053..dcb97f9268 100644 --- a/internal/service/datalakepipeline/resource_data_lake_pipeline.go +++ b/internal/service/datalakepipeline/resource_data_lake_pipeline.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/encryptionatrest/model_encryption_at_rest.go b/internal/service/encryptionatrest/model_encryption_at_rest.go index d52e8ada5b..0e40129e11 100644 --- a/internal/service/encryptionatrest/model_encryption_at_rest.go +++ b/internal/service/encryptionatrest/model_encryption_at_rest.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewTfEncryptionAtRestRSModel(ctx context.Context, projectID string, encryptionResp *admin.EncryptionAtRest) *TfEncryptionAtRestRSModel { diff --git a/internal/service/encryptionatrest/model_encryption_at_rest_test.go b/internal/service/encryptionatrest/model_encryption_at_rest_test.go index e451e85c9c..ea426bc1a8 100644 --- a/internal/service/encryptionatrest/model_encryption_at_rest_test.go +++ b/internal/service/encryptionatrest/model_encryption_at_rest_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/encryptionatrest" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) var ( diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest.go b/internal/service/encryptionatrest/resource_encryption_at_rest.go index 8ba9b7de6b..010cc03f3a 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest.go @@ -24,7 +24,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/project" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go index 279738d987..0c5f638c7a 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestMigEncryptionAtRest_basicAWS(t *testing.T) { diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go index d44e941a0b..0b9980e92c 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go @@ -16,8 +16,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "go.mongodb.org/atlas-sdk/v20240530002/admin" - "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" ) const ( diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go index 8a8158e399..1f91f587f2 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestAccFederatedDatabaseInstanceDS_s3Bucket(t *testing.T) { diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go index 327ec41abe..aa29744694 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance.go index 48b4d2c1ff..647b7629a8 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance.go @@ -7,7 +7,7 @@ import ( "net/http" "strings" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -714,7 +714,9 @@ func newUrls(urlsFromConfig []any) *[]string { func newCloudProviderConfig(d *schema.ResourceData) *admin.DataLakeCloudProviderConfig { if cloudProvider, ok := d.Get("cloud_provider_config").([]any); ok && len(cloudProvider) == 1 { - return admin.NewDataLakeCloudProviderConfig(*newAWSConfig(cloudProvider)) + return &admin.DataLakeCloudProviderConfig{ + Aws: newAWSConfig(cloudProvider), + } } return nil diff --git a/internal/service/federatedquerylimit/data_source_federated_query_limits.go b/internal/service/federatedquerylimit/data_source_federated_query_limits.go index 20b8257250..c270ed8c99 100644 --- a/internal/service/federatedquerylimit/data_source_federated_query_limits.go +++ b/internal/service/federatedquerylimit/data_source_federated_query_limits.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedquerylimit/resource_federated_query_limit.go b/internal/service/federatedquerylimit/resource_federated_query_limit.go index 58ceb1f7d5..9e8c744a26 100644 --- a/internal/service/federatedquerylimit/resource_federated_query_limit.go +++ b/internal/service/federatedquerylimit/resource_federated_query_limit.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go index 73645c947a..67eaee4feb 100644 --- a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go +++ b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go index dfddcbcec5..a307e73983 100644 --- a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go +++ b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" diff --git a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go index a4a8f9b261..a1505b9d89 100644 --- a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go +++ b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/stretchr/testify/assert" diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go index 62d6ce0ba4..e930171af6 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func DataSourceSettings() *schema.Resource { diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go index d9a948215f..0aca97e00f 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go b/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go index fdc06ffc07..d9a8ab937d 100644 --- a/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go +++ b/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) type roleMappingsByGroupName []admin.AuthFederationRoleMapping diff --git a/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go b/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go index f8371255ff..ae8241e996 100644 --- a/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go +++ b/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go b/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go index bd411c53fd..5a0208f843 100644 --- a/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go +++ b/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) type mRoleAssignment []admin.RoleAssignment diff --git a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go index f9e9df91bd..fb5512dd1d 100644 --- a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go +++ b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/globalclusterconfig/data_source_global_cluster_config.go b/internal/service/globalclusterconfig/data_source_global_cluster_config.go index 0672c005e3..099c4af659 100644 --- a/internal/service/globalclusterconfig/data_source_global_cluster_config.go +++ b/internal/service/globalclusterconfig/data_source_global_cluster_config.go @@ -62,11 +62,11 @@ func DataSource() *schema.Resource { } func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) - globalCluster, resp, err := connV2.GlobalClustersApi.GetManagedNamespace(ctx, projectID, clusterName).Execute() + globalCluster, resp, err := connV220240530.GlobalClustersApi.GetManagedNamespace(ctx, projectID, clusterName).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config.go b/internal/service/globalclusterconfig/resource_global_cluster_config.go index edcbd33111..ff1286d8e5 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" // fixed to old API due to CLOUDP-263795 ) const ( @@ -101,7 +101,7 @@ func Resource() *schema.Resource { } func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) @@ -109,7 +109,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. for _, m := range v.(*schema.Set).List() { mn := m.(map[string]any) - addManagedNamespace := &admin.ManagedNamespace{ + addManagedNamespace := &admin20240530.ManagedNamespace{ Collection: conversion.StringPtr(mn["collection"].(string)), Db: conversion.StringPtr(mn["db"].(string)), CustomShardKey: conversion.StringPtr(mn["custom_shard_key"].(string)), @@ -124,10 +124,10 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } err := retry.RetryContext(ctx, 2*time.Minute, func() *retry.RetryError { - _, _, err := connV2.GlobalClustersApi.CreateManagedNamespace(ctx, projectID, clusterName, addManagedNamespace).Execute() + _, _, err := connV220240530.GlobalClustersApi.CreateManagedNamespace(ctx, projectID, clusterName, addManagedNamespace).Execute() if err != nil { - if admin.IsErrorCode(err, "DUPLICATE_MANAGED_NAMESPACE") { - if err := removeManagedNamespaces(ctx, connV2, v.(*schema.Set).List(), projectID, clusterName); err != nil { + if admin20240530.IsErrorCode(err, "DUPLICATE_MANAGED_NAMESPACE") { + if err := removeManagedNamespaces(ctx, connV220240530, v.(*schema.Set).List(), projectID, clusterName); err != nil { return retry.NonRetryableError(fmt.Errorf(errorGlobalClusterCreate, err)) } return retry.RetryableError(err) @@ -143,13 +143,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if v, ok := d.GetOk("custom_zone_mappings"); ok { - _, _, err := connV2.GlobalClustersApi.CreateCustomZoneMapping(ctx, projectID, clusterName, &admin.CustomZoneMappings{ + _, _, err := connV220240530.GlobalClustersApi.CreateCustomZoneMapping(ctx, projectID, clusterName, &admin20240530.CustomZoneMappings{ CustomZoneMappings: newCustomZoneMappings(v.(*schema.Set).List()), }).Execute() if err != nil { if v2, ok2 := d.GetOk("managed_namespaces"); ok2 { - if err := removeManagedNamespaces(ctx, connV2, v2.(*schema.Set).List(), projectID, clusterName); err != nil { + if err := removeManagedNamespaces(ctx, connV220240530, v2.(*schema.Set).List(), projectID, clusterName); err != nil { return diag.FromErr(fmt.Errorf(errorGlobalClusterCreate, err)) } } @@ -166,12 +166,12 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 // fixed to old API due to CLOUDP-263795 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - globalCluster, resp, err := connV2.GlobalClustersApi.GetManagedNamespace(ctx, projectID, clusterName).Execute() + globalCluster, resp, err := connV220240530.GlobalClustersApi.GetManagedNamespace(ctx, projectID, clusterName).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -199,20 +199,20 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] if v, ok := d.GetOk("managed_namespaces"); ok { - if err := removeManagedNamespaces(ctx, connV2, v.(*schema.Set).List(), projectID, clusterName); err != nil { + if err := removeManagedNamespaces(ctx, connV220240530, v.(*schema.Set).List(), projectID, clusterName); err != nil { return diag.FromErr(fmt.Errorf(errorGlobalClusterDelete, clusterName, err)) } } if v, ok := d.GetOk("custom_zone_mappings"); ok { if v.(*schema.Set).Len() > 0 { - if _, _, err := connV2.GlobalClustersApi.DeleteAllCustomZoneMappings(ctx, projectID, clusterName).Execute(); err != nil { + if _, _, err := connV220240530.GlobalClustersApi.DeleteAllCustomZoneMappings(ctx, projectID, clusterName).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorGlobalClusterDelete, clusterName, err)) } } @@ -221,7 +221,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. return nil } -func flattenManagedNamespaces(managedNamespaces []admin.ManagedNamespaces) []map[string]any { +func flattenManagedNamespaces(managedNamespaces []admin20240530.ManagedNamespaces) []map[string]any { var results []map[string]any if len(managedNamespaces) > 0 { @@ -265,17 +265,17 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func removeManagedNamespaces(ctx context.Context, connV2 *admin.APIClient, remove []any, projectID, clusterName string) error { +func removeManagedNamespaces(ctx context.Context, connV220240530 *admin20240530.APIClient, remove []any, projectID, clusterName string) error { for _, m := range remove { mn := m.(map[string]any) - managedNamespace := &admin.DeleteManagedNamespaceApiParams{ + managedNamespace := &admin20240530.DeleteManagedNamespaceApiParams{ Collection: conversion.StringPtr(mn["collection"].(string)), Db: conversion.StringPtr(mn["db"].(string)), ClusterName: clusterName, GroupId: projectID, } - _, _, err := connV2.GlobalClustersApi.DeleteManagedNamespaceWithParams(ctx, managedNamespace).Execute() + _, _, err := connV220240530.GlobalClustersApi.DeleteManagedNamespaceWithParams(ctx, managedNamespace).Execute() if err != nil { return err @@ -284,12 +284,12 @@ func removeManagedNamespaces(ctx context.Context, connV2 *admin.APIClient, remov return nil } -func newCustomZoneMapping(tfMap map[string]any) *admin.ZoneMapping { +func newCustomZoneMapping(tfMap map[string]any) *admin20240530.ZoneMapping { if tfMap == nil { return nil } - apiObject := &admin.ZoneMapping{ + apiObject := &admin20240530.ZoneMapping{ Location: tfMap["location"].(string), Zone: tfMap["zone"].(string), } @@ -297,12 +297,12 @@ func newCustomZoneMapping(tfMap map[string]any) *admin.ZoneMapping { return apiObject } -func newCustomZoneMappings(tfList []any) *[]admin.ZoneMapping { +func newCustomZoneMappings(tfList []any) *[]admin20240530.ZoneMapping { if len(tfList) == 0 { return nil } - apiObjects := make([]admin.ZoneMapping, len(tfList)) + apiObjects := make([]admin20240530.ZoneMapping, len(tfList)) if len(tfList) > 0 { for i, tfMapRaw := range tfList { if tfMap, ok := tfMapRaw.(map[string]any); ok { diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration.go b/internal/service/ldapconfiguration/resource_ldap_configuration.go index a64c54b400..9182281009 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/ldapverify/resource_ldap_verify.go b/internal/service/ldapverify/resource_ldap_verify.go index a8ad9cf9a1..e199c63e97 100644 --- a/internal/service/ldapverify/resource_ldap_verify.go +++ b/internal/service/ldapverify/resource_ldap_verify.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/maintenancewindow/resource_maintenance_window.go b/internal/service/maintenancewindow/resource_maintenance_window.go index 85ff7891b6..ca60b6cce1 100644 --- a/internal/service/maintenancewindow/resource_maintenance_window.go +++ b/internal/service/maintenancewindow/resource_maintenance_window.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/networkcontainer/data_source_network_containers.go b/internal/service/networkcontainer/data_source_network_containers.go index 871928b474..ad5218c2cf 100644 --- a/internal/service/networkcontainer/data_source_network_containers.go +++ b/internal/service/networkcontainer/data_source_network_containers.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/networkcontainer/resource_network_container.go b/internal/service/networkcontainer/resource_network_container.go index b185391b36..e404ff7df1 100644 --- a/internal/service/networkcontainer/resource_network_container.go +++ b/internal/service/networkcontainer/resource_network_container.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/networkpeering/data_source_network_peering.go b/internal/service/networkpeering/data_source_network_peering.go index 74ac732407..f596831578 100644 --- a/internal/service/networkpeering/data_source_network_peering.go +++ b/internal/service/networkpeering/data_source_network_peering.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func DataSource() *schema.Resource { diff --git a/internal/service/networkpeering/data_source_network_peerings.go b/internal/service/networkpeering/data_source_network_peerings.go index 3dc1967aa0..5412234217 100644 --- a/internal/service/networkpeering/data_source_network_peerings.go +++ b/internal/service/networkpeering/data_source_network_peerings.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/networkpeering/resource_network_peering.go b/internal/service/networkpeering/resource_network_peering.go index 1058774dd2..23efb04908 100644 --- a/internal/service/networkpeering/resource_network_peering.go +++ b/internal/service/networkpeering/resource_network_peering.go @@ -16,7 +16,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/networkcontainer" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/onlinearchive/resource_online_archive.go b/internal/service/onlinearchive/resource_online_archive.go index 1e93c832d8..d93371f089 100644 --- a/internal/service/onlinearchive/resource_online_archive.go +++ b/internal/service/onlinearchive/resource_online_archive.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/organization/data_source_organizations.go b/internal/service/organization/data_source_organizations.go index 9ba3c1eabe..484dab350a 100644 --- a/internal/service/organization/data_source_organizations.go +++ b/internal/service/organization/data_source_organizations.go @@ -5,7 +5,7 @@ import ( "fmt" "log" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/organization/resource_organization.go b/internal/service/organization/resource_organization.go index feaa210241..dbeaa71c81 100644 --- a/internal/service/organization/resource_organization.go +++ b/internal/service/organization/resource_organization.go @@ -6,7 +6,7 @@ import ( "log" "net/http" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index 20546ff96f..7b65af7ec8 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -7,7 +7,7 @@ import ( "regexp" "testing" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" diff --git a/internal/service/orginvitation/resource_org_invitation.go b/internal/service/orginvitation/resource_org_invitation.go index fb64f43946..bcdc8c7c16 100644 --- a/internal/service/orginvitation/resource_org_invitation.go +++ b/internal/service/orginvitation/resource_org_invitation.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go index f3eb5f7c95..afb105fdab 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) type permCtxKey string @@ -89,6 +89,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Id() enabled := d.Get("enabled").(bool) @@ -114,7 +115,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. stateConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, conn.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), Timeout: d.Timeout(timeoutKey.(string)), MinTimeout: 5 * time.Second, Delay: 3 * time.Second, diff --git a/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go b/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go index 168bcbe263..261638f6a8 100644 --- a/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go +++ b/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go index 729332facc..cf58e60062 100644 --- a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go +++ b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go index 7c0ccbd942..47450f69f5 100644 --- a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go +++ b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( @@ -142,6 +142,7 @@ func Resource() *schema.Resource { func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) privateLinkID := conversion.GetEncodedID(d.Get("private_link_id").(string), "private_link_id") providerName := d.Get("provider_name").(string) @@ -192,7 +193,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. clusterConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV2.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 5 * time.Second, Delay: 5 * time.Minute, @@ -285,6 +286,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] @@ -316,7 +318,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. clusterConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV2.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), Timeout: d.Timeout(schema.TimeoutDelete), MinTimeout: 5 * time.Second, Delay: 5 * time.Minute, diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go index ac86d494a2..e7df9475d4 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/datalakepipeline" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const errorPrivateEndpointServiceDataFederationOnlineArchiveList = "error reading Private Endpoings for projectId %s: %s" diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go index 70a41e734f..0ae4f26ef8 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go b/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go index 58bc37361f..c162674bb5 100644 --- a/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go +++ b/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go index ed335d3a18..4f87ceecfc 100644 --- a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go +++ b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/project/data_source_project.go b/internal/service/project/data_source_project.go index 2cbc63330f..30bd51cdf8 100644 --- a/internal/service/project/data_source_project.go +++ b/internal/service/project/data_source_project.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/datasource" diff --git a/internal/service/project/data_source_projects.go b/internal/service/project/data_source_projects.go index eff493d60c..e2d17eb7a4 100644 --- a/internal/service/project/data_source_projects.go +++ b/internal/service/project/data_source_projects.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const projectsDataSourceName = "projects" diff --git a/internal/service/project/model_project.go b/internal/service/project/model_project.go index 628bbb687e..2a1ffd8e3b 100644 --- a/internal/service/project/model_project.go +++ b/internal/service/project/model_project.go @@ -3,7 +3,7 @@ package project import ( "context" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" diff --git a/internal/service/project/model_project_test.go b/internal/service/project/model_project_test.go index 81dca2d660..ec139f2309 100644 --- a/internal/service/project/model_project_test.go +++ b/internal/service/project/model_project_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" diff --git a/internal/service/project/resource_project.go b/internal/service/project/resource_project.go index 1244f0728d..bc7671807d 100644 --- a/internal/service/project/resource_project.go +++ b/internal/service/project/resource_project.go @@ -9,7 +9,7 @@ import ( "sort" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/path" @@ -130,7 +130,7 @@ var TfLimitObjectType = types.ObjectType{AttrTypes: map[string]attr.Type{ // Resources that need to be cleaned up before a project can be deleted type AtlasProjectDependants struct { - AdvancedClusters *admin.PaginatedAdvancedClusterDescription + AdvancedClusters *admin.PaginatedClusterDescription20240805 } func (r *projectRS) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { diff --git a/internal/service/project/resource_project_migration_test.go b/internal/service/project/resource_project_migration_test.go index ec7ed63e1c..76b5042f63 100644 --- a/internal/service/project/resource_project_migration_test.go +++ b/internal/service/project/resource_project_migration_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" diff --git a/internal/service/project/resource_project_test.go b/internal/service/project/resource_project_test.go index 973c5ec205..ef63f2852b 100644 --- a/internal/service/project/resource_project_test.go +++ b/internal/service/project/resource_project_test.go @@ -11,8 +11,8 @@ import ( "strings" "testing" - "go.mongodb.org/atlas-sdk/v20240530002/admin" - "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -451,7 +451,7 @@ func TestResourceProjectDependentsDeletingRefreshFunc(t *testing.T) { { name: "Error not from the API", mockResponses: AdvancedClusterDescriptionResponse{ - AdvancedClusterDescription: &admin.PaginatedAdvancedClusterDescription{}, + AdvancedClusterDescription: &admin.PaginatedClusterDescription20240805{}, Err: errors.New("Non-API error"), }, expectedError: true, @@ -459,7 +459,7 @@ func TestResourceProjectDependentsDeletingRefreshFunc(t *testing.T) { { name: "Error from the API", mockResponses: AdvancedClusterDescriptionResponse{ - AdvancedClusterDescription: &admin.PaginatedAdvancedClusterDescription{}, + AdvancedClusterDescription: &admin.PaginatedClusterDescription20240805{}, Err: &admin.GenericOpenAPIError{}, }, expectedError: true, @@ -467,9 +467,9 @@ func TestResourceProjectDependentsDeletingRefreshFunc(t *testing.T) { { name: "Successful API call", mockResponses: AdvancedClusterDescriptionResponse{ - AdvancedClusterDescription: &admin.PaginatedAdvancedClusterDescription{ + AdvancedClusterDescription: &admin.PaginatedClusterDescription20240805{ TotalCount: conversion.IntPtr(2), - Results: &[]admin.AdvancedClusterDescription{ + Results: &[]admin.ClusterDescription20240805{ {StateName: conversion.StringPtr("IDLE")}, {StateName: conversion.StringPtr("DELETING")}, }, @@ -1259,7 +1259,7 @@ type DeleteProjectLimitResponse struct { Err error } type AdvancedClusterDescriptionResponse struct { - AdvancedClusterDescription *admin.PaginatedAdvancedClusterDescription + AdvancedClusterDescription *admin.PaginatedClusterDescription20240805 HTTPResponse *http.Response Err error } diff --git a/internal/service/projectapikey/data_source_project_api_keys.go b/internal/service/projectapikey/data_source_project_api_keys.go index 117a11c436..55af1f551b 100644 --- a/internal/service/projectapikey/data_source_project_api_keys.go +++ b/internal/service/projectapikey/data_source_project_api_keys.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/projectapikey/resource_project_api_key.go b/internal/service/projectapikey/resource_project_api_key.go index 439f4dfaa4..f4a6b12c1d 100644 --- a/internal/service/projectapikey/resource_project_api_key.go +++ b/internal/service/projectapikey/resource_project_api_key.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/projectinvitation/resource_project_invitation.go b/internal/service/projectinvitation/resource_project_invitation.go index 4172d0081e..8ca1b7c199 100644 --- a/internal/service/projectinvitation/resource_project_invitation.go +++ b/internal/service/projectinvitation/resource_project_invitation.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/projectipaccesslist/model_project_ip_access_list.go b/internal/service/projectipaccesslist/model_project_ip_access_list.go index a33e77c34f..12c7bae998 100644 --- a/internal/service/projectipaccesslist/model_project_ip_access_list.go +++ b/internal/service/projectipaccesslist/model_project_ip_access_list.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewMongoDBProjectIPAccessList(projectIPAccessListModel *TfProjectIPAccessListModel) *[]admin.NetworkPermissionEntry { diff --git a/internal/service/projectipaccesslist/model_project_ip_access_list_test.go b/internal/service/projectipaccesslist/model_project_ip_access_list_test.go index 282939b0a6..e51f4a4787 100644 --- a/internal/service/projectipaccesslist/model_project_ip_access_list_test.go +++ b/internal/service/projectipaccesslist/model_project_ip_access_list_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/projectipaccesslist" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) var ( diff --git a/internal/service/projectipaccesslist/resource_project_ip_access_list.go b/internal/service/projectipaccesslist/resource_project_ip_access_list.go index 07b91ffdfc..144d587ce1 100644 --- a/internal/service/projectipaccesslist/resource_project_ip_access_list.go +++ b/internal/service/projectipaccesslist/resource_project_ip_access_list.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" diff --git a/internal/service/pushbasedlogexport/model.go b/internal/service/pushbasedlogexport/model.go index c32f52e514..0238196c0b 100644 --- a/internal/service/pushbasedlogexport/model.go +++ b/internal/service/pushbasedlogexport/model.go @@ -3,7 +3,7 @@ package pushbasedlogexport import ( "context" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/diag" diff --git a/internal/service/pushbasedlogexport/model_test.go b/internal/service/pushbasedlogexport/model_test.go index 10e1678d18..c0523a6c00 100644 --- a/internal/service/pushbasedlogexport/model_test.go +++ b/internal/service/pushbasedlogexport/model_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/types" diff --git a/internal/service/pushbasedlogexport/resource.go b/internal/service/pushbasedlogexport/resource.go index aad810d34c..dfebae9189 100644 --- a/internal/service/pushbasedlogexport/resource.go +++ b/internal/service/pushbasedlogexport/resource.go @@ -7,7 +7,7 @@ import ( "slices" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" diff --git a/internal/service/pushbasedlogexport/state_transition.go b/internal/service/pushbasedlogexport/state_transition.go index 3286736b13..e8c1283339 100644 --- a/internal/service/pushbasedlogexport/state_transition.go +++ b/internal/service/pushbasedlogexport/state_transition.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/pushbasedlogexport/state_transition_test.go b/internal/service/pushbasedlogexport/state_transition_test.go index 137d774d6e..d49f0757b3 100644 --- a/internal/service/pushbasedlogexport/state_transition_test.go +++ b/internal/service/pushbasedlogexport/state_transition_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20240530002/admin" - "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" diff --git a/internal/service/searchdeployment/model_search_deployment.go b/internal/service/searchdeployment/model_search_deployment.go index c6f80c5f1f..8548aacf19 100644 --- a/internal/service/searchdeployment/model_search_deployment.go +++ b/internal/service/searchdeployment/model_search_deployment.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewSearchDeploymentReq(ctx context.Context, searchDeploymentPlan *TFSearchDeploymentRSModel) admin.ApiSearchDeploymentRequest { diff --git a/internal/service/searchdeployment/model_search_deployment_test.go b/internal/service/searchdeployment/model_search_deployment_test.go index 643c3dd458..e82b8a6ff7 100644 --- a/internal/service/searchdeployment/model_search_deployment_test.go +++ b/internal/service/searchdeployment/model_search_deployment_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/searchdeployment" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) type sdkToTFModelTestCase struct { diff --git a/internal/service/searchdeployment/state_transition_search_deployment.go b/internal/service/searchdeployment/state_transition_search_deployment.go index 3ba981c451..98c992be4c 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment.go +++ b/internal/service/searchdeployment/state_transition_search_deployment.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/retrystrategy" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const SearchDeploymentDoesNotExistsError = "ATLAS_SEARCH_DEPLOYMENT_DOES_NOT_EXIST" diff --git a/internal/service/searchdeployment/state_transition_search_deployment_test.go b/internal/service/searchdeployment/state_transition_search_deployment_test.go index 21511e0d95..a004a1e4eb 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment_test.go +++ b/internal/service/searchdeployment/state_transition_search_deployment_test.go @@ -12,8 +12,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/searchdeployment" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "go.mongodb.org/atlas-sdk/v20240530002/admin" - "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" ) var ( diff --git a/internal/service/searchindex/data_source_search_indexes.go b/internal/service/searchindex/data_source_search_indexes.go index 3cfd89f617..d3bd55bc8f 100644 --- a/internal/service/searchindex/data_source_search_indexes.go +++ b/internal/service/searchindex/data_source_search_indexes.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/searchindex/model_search_index.go b/internal/service/searchindex/model_search_index.go index 6b5adfbbb4..40f7fb4d8c 100644 --- a/internal/service/searchindex/model_search_index.go +++ b/internal/service/searchindex/model_search_index.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func flattenSearchIndexSynonyms(synonyms []admin.SearchSynonymMappingDefinition) []map[string]any { diff --git a/internal/service/searchindex/resource_search_index.go b/internal/service/searchindex/resource_search_index.go index 0139101588..559202413b 100644 --- a/internal/service/searchindex/resource_search_index.go +++ b/internal/service/searchindex/resource_search_index.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/serverlessinstance/data_source_serverless_instances.go b/internal/service/serverlessinstance/data_source_serverless_instances.go index a55498593a..52f089258e 100644 --- a/internal/service/serverlessinstance/data_source_serverless_instances.go +++ b/internal/service/serverlessinstance/data_source_serverless_instances.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/serverlessinstance/resource_serverless_instance.go b/internal/service/serverlessinstance/resource_serverless_instance.go index 2f7a525db2..828a7eaa03 100644 --- a/internal/service/serverlessinstance/resource_serverless_instance.go +++ b/internal/service/serverlessinstance/resource_serverless_instance.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/serverlessinstance/resource_serverless_instance_test.go b/internal/service/serverlessinstance/resource_serverless_instance_test.go index c7602623d9..a527d70629 100644 --- a/internal/service/serverlessinstance/resource_serverless_instance_test.go +++ b/internal/service/serverlessinstance/resource_serverless_instance_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go b/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go index ac5219b683..112ecf1086 100644 --- a/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go +++ b/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/sharedtier/data_source_shared_tier_snapshots.go b/internal/service/sharedtier/data_source_shared_tier_snapshots.go index ff83218e5e..7654136b99 100644 --- a/internal/service/sharedtier/data_source_shared_tier_snapshots.go +++ b/internal/service/sharedtier/data_source_shared_tier_snapshots.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/streamconnection/data_source_stream_connections.go b/internal/service/streamconnection/data_source_stream_connections.go index 5b4835dd4b..3800fc1052 100644 --- a/internal/service/streamconnection/data_source_stream_connections.go +++ b/internal/service/streamconnection/data_source_stream_connections.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) var _ datasource.DataSource = &streamConnectionsDS{} diff --git a/internal/service/streamconnection/data_source_stream_connections_test.go b/internal/service/streamconnection/data_source_stream_connections_test.go index ca480ae389..47af9736cc 100644 --- a/internal/service/streamconnection/data_source_stream_connections_test.go +++ b/internal/service/streamconnection/data_source_stream_connections_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestAccStreamDSStreamConnections_basic(t *testing.T) { diff --git a/internal/service/streamconnection/model_stream_connection.go b/internal/service/streamconnection/model_stream_connection.go index 0c2a0ece7d..142efd7146 100644 --- a/internal/service/streamconnection/model_stream_connection.go +++ b/internal/service/streamconnection/model_stream_connection.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewStreamConnectionReq(ctx context.Context, plan *TFStreamConnectionModel) (*admin.StreamsConnection, diag.Diagnostics) { diff --git a/internal/service/streamconnection/model_stream_connection_test.go b/internal/service/streamconnection/model_stream_connection_test.go index 16ef34747d..c60e122983 100644 --- a/internal/service/streamconnection/model_stream_connection_test.go +++ b/internal/service/streamconnection/model_stream_connection_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamconnection" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/streaminstance/data_source_stream_instances.go b/internal/service/streaminstance/data_source_stream_instances.go index b2cff18b7b..898ffc3ae3 100644 --- a/internal/service/streaminstance/data_source_stream_instances.go +++ b/internal/service/streaminstance/data_source_stream_instances.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) var _ datasource.DataSource = &streamInstancesDS{} diff --git a/internal/service/streaminstance/data_source_stream_instances_test.go b/internal/service/streaminstance/data_source_stream_instances_test.go index 9ea31f3118..37b952ad9b 100644 --- a/internal/service/streaminstance/data_source_stream_instances_test.go +++ b/internal/service/streaminstance/data_source_stream_instances_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestAccStreamDSStreamInstances_basic(t *testing.T) { diff --git a/internal/service/streaminstance/model_stream_instance.go b/internal/service/streaminstance/model_stream_instance.go index a50a3253ec..e11f7f3c06 100644 --- a/internal/service/streaminstance/model_stream_instance.go +++ b/internal/service/streaminstance/model_stream_instance.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func NewStreamInstanceCreateReq(ctx context.Context, plan *TFStreamInstanceModel) (*admin.StreamsTenant, diag.Diagnostics) { diff --git a/internal/service/streaminstance/model_stream_instance_test.go b/internal/service/streaminstance/model_stream_instance_test.go index 126baeb093..94d69cb194 100644 --- a/internal/service/streaminstance/model_stream_instance_test.go +++ b/internal/service/streaminstance/model_stream_instance_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streaminstance" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/team/data_source_team.go b/internal/service/team/data_source_team.go index 99017170f2..6ac8288b76 100644 --- a/internal/service/team/data_source_team.go +++ b/internal/service/team/data_source_team.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func DataSource() *schema.Resource { diff --git a/internal/service/team/resource_team.go b/internal/service/team/resource_team.go index a9c423e629..3fd175f037 100644 --- a/internal/service/team/resource_team.go +++ b/internal/service/team/resource_team.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/service/thirdpartyintegration/data_source_third_party_integrations.go b/internal/service/thirdpartyintegration/data_source_third_party_integrations.go index daf79ed180..a4d5fcca9f 100644 --- a/internal/service/thirdpartyintegration/data_source_third_party_integrations.go +++ b/internal/service/thirdpartyintegration/data_source_third_party_integrations.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go index 7b734cb61c..0044516409 100644 --- a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go +++ b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( diff --git a/internal/testutil/acc/atlas.go b/internal/testutil/acc/atlas.go index b2bcfe010b..2fa121b10f 100644 --- a/internal/testutil/acc/atlas.go +++ b/internal/testutil/acc/atlas.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func createProject(tb testing.TB, name string) string { @@ -38,7 +38,7 @@ func createCluster(tb testing.TB, projectID, name string) string { _, _, err := ConnV2().ClustersApi.CreateCluster(context.Background(), projectID, &req).Execute() require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) - stateConf := advancedcluster.CreateStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) + stateConf := advancedcluster.CreateStateChangeConfig(context.Background(), ConnV220240530(), projectID, name, 1*time.Hour) _, err = stateConf.WaitForStateContext(context.Background()) require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) @@ -50,26 +50,26 @@ func deleteCluster(projectID, name string) { if err != nil { fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) } - stateConf := advancedcluster.DeleteStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) + stateConf := advancedcluster.DeleteStateChangeConfig(context.Background(), ConnV220240530(), projectID, name, 1*time.Hour) _, err = stateConf.WaitForStateContext(context.Background()) if err != nil { fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) } } -func clusterReq(name, projectID string) admin.AdvancedClusterDescription { - return admin.AdvancedClusterDescription{ +func clusterReq(name, projectID string) admin.ClusterDescription20240805 { + return admin.ClusterDescription20240805{ Name: admin.PtrString(name), GroupId: admin.PtrString(projectID), ClusterType: admin.PtrString("REPLICASET"), - ReplicationSpecs: &[]admin.ReplicationSpec{ + ReplicationSpecs: &[]admin.ReplicationSpec20240805{ { - RegionConfigs: &[]admin.CloudRegionConfig{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { ProviderName: admin.PtrString(constant.AWS), RegionName: admin.PtrString(constant.UsWest2), Priority: admin.PtrInt(7), - ElectableSpecs: &admin.HardwareSpec{ + ElectableSpecs: &admin.HardwareSpec20240805{ InstanceSize: admin.PtrString(constant.M10), NodeCount: admin.PtrInt(3), }, diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index 9298e1da19..615a5430a1 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -7,7 +7,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) // ClusterRequest contains configuration for a cluster where all fields are optional and AddDefaults is used for required fields. @@ -136,9 +136,9 @@ func (r *ReplicationSpecRequest) AddDefaults() { } } -func (r *ReplicationSpecRequest) AllRegionConfigs() []admin.CloudRegionConfig { +func (r *ReplicationSpecRequest) AllRegionConfigs() []admin.CloudRegionConfig20240805 { config := cloudRegionConfig(*r) - configs := []admin.CloudRegionConfig{config} + configs := []admin.CloudRegionConfig20240805{config} for i := range r.ExtraRegionConfigs { extra := r.ExtraRegionConfigs[i] configs = append(configs, cloudRegionConfig(extra)) @@ -146,34 +146,32 @@ func (r *ReplicationSpecRequest) AllRegionConfigs() []admin.CloudRegionConfig { return configs } -func replicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { +func replicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec20240805 { if req == nil { req = new(ReplicationSpecRequest) } req.AddDefaults() - defaultNumShards := 1 regionConfigs := req.AllRegionConfigs() - return admin.ReplicationSpec{ - NumShards: &defaultNumShards, + return admin.ReplicationSpec20240805{ ZoneName: &req.ZoneName, RegionConfigs: ®ionConfigs, } } -func cloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { +func cloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig20240805 { req.AddDefaults() - var readOnly admin.DedicatedHardwareSpec + var readOnly admin.DedicatedHardwareSpec20240805 if req.NodeCountReadOnly != 0 { - readOnly = admin.DedicatedHardwareSpec{ + readOnly = admin.DedicatedHardwareSpec20240805{ NodeCount: &req.NodeCountReadOnly, InstanceSize: &req.InstanceSize, } } - return admin.CloudRegionConfig{ + return admin.CloudRegionConfig20240805{ RegionName: &req.Region, Priority: &req.Priority, ProviderName: &req.ProviderName, - ElectableSpecs: &admin.HardwareSpec{ + ElectableSpecs: &admin.HardwareSpec20240805{ InstanceSize: &req.InstanceSize, NodeCount: &req.NodeCount, EbsVolumeType: conversion.StringPtr(req.EbsVolumeType), diff --git a/internal/testutil/acc/config_cluster.go b/internal/testutil/acc/config_cluster.go index 2968356ff9..c21501224d 100644 --- a/internal/testutil/acc/config_cluster.go +++ b/internal/testutil/acc/config_cluster.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func ClusterDatasourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { @@ -45,7 +45,7 @@ func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceNa projectID := req.ProjectID req.AddDefaults() specRequests := req.ReplicationSpecs - specs := make([]admin.ReplicationSpec, len(specRequests)) + specs := make([]admin.ReplicationSpec20240805, len(specRequests)) for i := range specRequests { specRequest := specRequests[i] specs[i] = replicationSpec(&specRequest) @@ -119,7 +119,7 @@ func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceNa return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err } -func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { +func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec20240805) error { replicationBlock := cluster.AppendNewBlock("replication_specs", nil).Body() err := addPrimitiveAttributesViaJSON(replicationBlock, spec) if err != nil { diff --git a/internal/testutil/acc/config_cluster_test.go b/internal/testutil/acc/config_cluster_test.go index 724e0ec8d6..306c0fc15d 100644 --- a/internal/testutil/acc/config_cluster_test.go +++ b/internal/testutil/acc/config_cluster_test.go @@ -18,8 +18,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { project_id = "project" replication_specs { - num_shards = 1 - zone_name = "Zone 1" + zone_name = "Zone 1" region_configs { priority = 7 @@ -52,8 +51,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { } replication_specs { - num_shards = 1 - zone_name = "Zone X" + zone_name = "Zone X" region_configs { priority = 7 @@ -82,8 +80,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { project_id = "project" replication_specs { - num_shards = 1 - zone_name = "Zone 1" + zone_name = "Zone 1" region_configs { priority = 7 @@ -111,8 +108,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { project_id = "project" replication_specs { - num_shards = 1 - zone_name = "Zone 1" + zone_name = "Zone 1" region_configs { priority = 7 @@ -140,8 +136,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { project_id = "project" replication_specs { - num_shards = 1 - zone_name = "Zone 1" + zone_name = "Zone 1" region_configs { priority = 7 @@ -157,8 +152,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { } } replication_specs { - num_shards = 1 - zone_name = "Zone 2" + zone_name = "Zone 2" region_configs { priority = 7 @@ -185,8 +179,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { project_id = "project" replication_specs { - num_shards = 1 - zone_name = "Zone 1" + zone_name = "Zone 1" region_configs { priority = 7 @@ -227,8 +220,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { project_id = "project" replication_specs { - num_shards = 1 - zone_name = "Zone 1" + zone_name = "Zone 1" region_configs { priority = 7 @@ -263,8 +255,7 @@ resource "mongodbatlas_advanced_cluster" "cluster_info" { project_id = "project" replication_specs { - num_shards = 1 - zone_name = "Zone 1" + zone_name = "Zone 1" region_configs { priority = 5 diff --git a/internal/testutil/acc/database_user.go b/internal/testutil/acc/database_user.go index 7710bb1333..4189186e73 100644 --- a/internal/testutil/acc/database_user.go +++ b/internal/testutil/acc/database_user.go @@ -3,7 +3,7 @@ package acc import ( "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func ConfigDatabaseUserBasic(projectID, username, roleName, keyLabel, valueLabel string) string { diff --git a/internal/testutil/acc/factory.go b/internal/testutil/acc/factory.go index 80b3fb63ea..608615d2ab 100644 --- a/internal/testutil/acc/factory.go +++ b/internal/testutil/acc/factory.go @@ -9,7 +9,8 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/provider" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( @@ -39,6 +40,10 @@ func ConnV2() *admin.APIClient { return MongoDBClient.AtlasV2 } +func ConnV220240530() *admin20240530.APIClient { + return MongoDBClient.AtlasV220240530 +} + func ConnV2UsingProxy(proxyPort *int) *admin.APIClient { cfg := config.Config{ PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), diff --git a/internal/testutil/acc/project.go b/internal/testutil/acc/project.go index 46e9bd01b7..1f4b1bbe35 100644 --- a/internal/testutil/acc/project.go +++ b/internal/testutil/acc/project.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func CheckDestroyProject(s *terraform.State) error { diff --git a/internal/testutil/acc/serverless.go b/internal/testutil/acc/serverless.go index d9c6501970..0453b5af57 100644 --- a/internal/testutil/acc/serverless.go +++ b/internal/testutil/acc/serverless.go @@ -3,7 +3,7 @@ package acc import ( "fmt" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func ConfigServerlessInstance(projectID, name string, ignoreConnectionStrings bool, autoIndexing *bool, tags []admin.ResourceTag) string { From 1065d78916f7d54aad0a081ab4c515d4ccc265cd Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Sun, 11 Aug 2024 19:36:43 +0200 Subject: [PATCH 80/84] manual fixes of versions in advanced cluster, cloud backup schedule, and other small compilations --- docs/index.md | 227 ++++++++++++++++++ internal/common/conversion/flatten_expand.go | 26 -- .../data_source_advanced_cluster.go | 12 +- .../data_source_advanced_clusters.go | 24 +- .../advancedcluster/model_advanced_cluster.go | 117 +++++---- .../model_advanced_cluster_test.go | 82 +++---- .../model_sdk_version_conversion.go | 84 +++---- .../resource_advanced_cluster.go | 96 ++++---- .../resource_advanced_cluster_test.go | 30 +-- .../advancedcluster/resource_update_logic.go | 14 +- .../resource_update_logic_test.go | 46 ++-- .../data_source_cloud_backup_schedule.go | 14 +- .../model_cloud_backup_schedule.go | 12 +- .../model_cloud_backup_schedule_test.go | 12 +- .../model_sdk_version_conversion.go | 32 +-- .../resource_cloud_backup_schedule.go | 64 ++--- ...ce_cloud_backup_schedule_migration_test.go | 9 +- .../resource_cloud_backup_schedule_test.go | 60 ++--- ...resource_private_endpoint_regional_mode.go | 3 +- .../resource_privatelink_endpoint_service.go | 6 +- .../data_source_stream_connections_test.go | 2 +- internal/testutil/acc/advanced_cluster.go | 2 +- internal/testutil/acc/atlas.go | 4 +- internal/testutil/acc/factory.go | 5 - 24 files changed, 575 insertions(+), 408 deletions(-) diff --git a/docs/index.md b/docs/index.md index e69de29bb2..06f666ecc1 100644 --- a/docs/index.md +++ b/docs/index.md @@ -0,0 +1,227 @@ +# MongoDB Atlas Provider + +You can use the MongoDB Atlas provider to interact with the resources supported by [MongoDB Atlas](https://www.mongodb.com/cloud/atlas). +The provider needs to be configured with the proper credentials before it can be used. + +Use the navigation to the left to read about the available provider resources and data sources. + +You may want to consider pinning the [provider version](https://www.terraform.io/docs/configuration/providers.html#provider-versions) to ensure you have a chance to review and prepare for changes. +Speaking of changes, see [CHANGELOG](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/CHANGELOG.md) for current version information. + +## Example Usage + +```terraform +# Configure the MongoDB Atlas Provider +provider "mongodbatlas" { + public_key = var.mongodbatlas_public_key + private_key = var.mongodbatlas_private_key +} +# Create the resources +``` +## Configure Atlas Programmatic Access + +In order to set up authentication with the MongoDB Atlas provider, you must generate a programmatic API key for MongoDB Atlas with the appropriate [role](https://docs.atlas.mongodb.com/reference/user-roles/). +The [MongoDB Atlas documentation](https://docs.atlas.mongodb.com/tutorial/manage-programmatic-access/index.html) contains the most up-to-date instructions for creating and managing your key(s), setting the appropriate role, and optionally configuring IP access. + +**Role**: If unsure of which role level to grant your key, we suggest creating an organization API Key with an Organization Owner role. This ensures that you have sufficient access for all actions. + +## Configure MongoDB Atlas for Government + +In order to enable the Terraform MongoDB Atlas Provider for use with MongoDB Atlas for Government add is_mongodbgov_cloud = true to your provider configuration: +```terraform +# Configure the MongoDB Atlas Provider for MongoDB Atlas for Government +provider "mongodbatlas" { + public_key = var.mongodbatlas_public_key + private_key = var.mongodbatlas_private_key + is_mongodbgov_cloud = true +} +# Create the resources +``` +Also see [`Atlas for Government Considerations`](https://www.mongodb.com/docs/atlas/government/api/#atlas-for-government-considerations). + +## Authenticate the Provider + +The MongoDB Atlas provider offers a flexible means of providing credentials for authentication. +You can use any the following methods: + +### Environment Variables + +You can also provide your credentials via the environment variables, +`MONGODB_ATLAS_PUBLIC_KEY` and `MONGODB_ATLAS_PRIVATE_KEY`, +for your public and private MongoDB Atlas programmatic API key pair respectively: + +```terraform +provider "mongodbatlas" {} +``` + +Usage (prefix the export commands with a space to avoid the keys being recorded in OS history): + +```shell +$ export MONGODB_ATLAS_PUBLIC_KEY="xxxx" +$ export MONGODB_ATLAS_PRIVATE_KEY="xxxx" +$ terraform plan +``` + +As an alternative to `MONGODB_ATLAS_PUBLIC_KEY` and `MONGODB_ATLAS_PRIVATE_KEY` +if you are using [MongoDB CLI](https://docs.mongodb.com/mongocli/stable/) +then `MCLI_PUBLIC_API_KEY` and `MCLI_PRIVATE_API_KEY` are also supported. + +### AWS Secrets Manager +AWS Secrets Manager (AWS SM) helps to manage, retrieve, and rotate database credentials, API keys, and other secrets throughout their lifecycles. See [product page](https://aws.amazon.com/secrets-manager/) and [documentation](https://docs.aws.amazon.com/systems-manager/latest/userguide/what-is-systems-manager.html) for more details. + +In order to enable the Terraform MongoDB Atlas Provider with AWS SM, please follow the below steps: + +1. Create Atlas API Keys and add them as one secret to AWS SM with a raw value. Take note of which AWS Region secret is being stored in. Public Key and Private Key each need to be entered as their own key value pair. See below example: +``` + { + "public_key": "secret1", + "private_key":"secret2" + } +``` +2. Create an AWS IAM Role to attach to the AWS STS (Security Token Service) generated short lived API keys. This is required since STS generated API Keys by default have restricted permissions and need to have their permissions elevated in order to authenticate with Terraform. Take note of Role ARN and ensure IAM Role has permission for “sts:AssumeRole”. For example: +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Statement1", + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": "sts:AssumeRole" + } + ] +} +``` +In addition, you are required to also attach the AWS Managed policy of `SecretsManagerReadWrite` to this IAM role. + +Note: this policy may be overly broad for many use cases, feel free to adjust accordingly to your organization's needs. + +3. In terminal, store as environmental variables AWS API Keys (while you can also hardcode in config files these will then be stored as plain text in .tfstate file and should be avoided if possible). For example: +``` +export AWS_ACCESS_KEY_ID="secret" +export AWS_SECRET_ACCESS_KEY="secret” +``` +4. In terminal, use the AWS CLI command: `aws sts assume-role --role-arn ROLE_ARN_FROM_ABOVE --role-session-name newSession` + +Note: AWS STS secrets are short lived by default, use the ` --duration-seconds` flag to specify longer duration as needed + +5. Store each of the 3 new created secrets from AWS STS as environment variables (hardcoding secrets into config file with additional risk is also supported). For example: +``` +export AWS_ACCESS_KEY_ID="ASIAYBYSK3S5FZEKLETV" +export AWS_SECRET_ACCESS_KEY="lgT6kL9lr1fxM6mCEwJ33MeoJ1M6lIzgsiW23FGH" +export AWS_SESSION_TOKEN="IQoXX3+Q" +``` + +6. Add assume_role block with `role_arn`, `secret_name`, and AWS `region` where secret is stored as part of AWS SM. Each of these 3 fields are REQUIRED. For example: +```terraform +# Configure the MongoDB Atlas Provider to Authenticate with AWS Secrets Manager +provider "mongodbatlas" { + assume_role { + role_arn = "arn:aws:iam::476xxx451:role/mdbsts" + } + secret_name = "mongodbsecret" + // fully qualified secret_name ARN also supported as input "arn:aws:secretsmanager:af-south-1:553552370874:secret:test789-TO06Hy" + region = "us-east-2" + + aws_access_key_id = "ASIXXBNEK" + aws_secret_access_key = "ZUZgVb8XYZWEXXEDURGFHFc5Au" + aws_session_token = "IQoXX3+Q=" + sts_endpoint = "https://sts.us-east-2.amazonaws.com/" +} +``` +Note: `aws_access_key_id`, `aws_secret_access_key`, and `aws_session_token` can also be passed in using environment variables i.e. aws_access_key_id will accept AWS_ACCESS_KEY_ID and TF_VAR_AWS_ACCESS_KEY_ID as a default value in place of value in a terraform file variable. + +Note: Fully qualified `secret_name` ARN as input is REQUIRED for cross-AWS account secrets. For more detatils see: +* https://aws.amazon.com/blogs/security/how-to-access-secrets-across-aws-accounts-by-attaching-resource-based-policies/ +* https://aws.amazon.com/premiumsupport/knowledge-center/secrets-manager-share-between-accounts/ + +Note: `sts_endpoint` parameter is REQUIRED for cross-AWS region or cross-AWS account secrets. + +7. In terminal, `terraform init` + +### Static Credentials + +Static credentials can be provided by adding the following attributes in-line in the MongoDB Atlas provider block, +either directly or via input variable/local value: + +```terraform +provider "mongodbatlas" { + public_key = "atlas_public_api_key" #required + private_key = "atlas_private_api_key" #required +} +``` + +~> *IMPORTANT* Hard-coding your MongoDB Atlas programmatic API key pair into a Terraform configuration is not recommended. +Consider the risks, especially the inadvertent submission of a configuration file containing secrets to a public repository. + +## Argument Reference + +In addition to [generic `provider` arguments](https://www.terraform.io/docs/configuration/providers.html) +(e.g. `alias` and `version`), the MongoDB Atlas `provider` supports the following arguments: + +* `public_key` - (Optional) This is the public key of your MongoDB Atlas API key pair. It must be + provided, but it can also be sourced from the `MONGODB_ATLAS_PUBLIC_KEY` or `MCLI_PUBLIC_API_KEY` + environment variable. + +* `private_key` - (Optional) This is the private key of your MongoDB Atlas key pair. It must be + provided, but it can also be sourced from the `MONGODB_ATLAS_PRIVATE_KEY` or `MCLI_PRIVATE_API_KEY` + environment variable. + +For more information on configuring and managing programmatic API Keys see the [MongoDB Atlas Documentation](https://docs.atlas.mongodb.com/tutorial/manage-programmatic-access/index.html). + +## [HashiCorp Terraform Version](https://www.terraform.io/downloads.html) Compatibility Matrix + + + +| HashiCorp Terraform Release | HashiCorp Terraform Release Date | HashiCorp Terraform Full Support End Date | MongoDB Atlas Support End Date | +|:-------:|:------------:|:------------:|:------------:| +| 1.9.x | 2024-06-26 | 2026-06-30 | 2026-06-30 | +| 1.8.x | 2024-04-10 | 2026-04-30 | 2026-04-30 | +| 1.7.x | 2024-01-17 | 2026-01-31 | 2026-01-31 | +| 1.6.x | 2023-10-04 | 2025-10-31 | 2025-10-31 | +| 1.5.x | 2023-06-12 | 2025-06-30 | 2025-06-30 | +| 1.4.x | 2023-03-08 | 2025-03-31 | 2025-03-31 | +| 1.3.x | 2022-09-21 | 2024-09-30 | 2024-09-30 | + +For the safety of our users, we require only consuming versions of HashiCorp Terraform that are currently receiving Security / Maintenance Updates. For more details see [Support Period and End-of-Life (EOL) Policy](https://support.hashicorp.com/hc/en-us/articles/360021185113-Support-Period-and-End-of-Life-EOL-Policy). + +HashiCorp Terraform versions that are not listed on this table are no longer supported by MongoDB Atlas. For latest HashiCorp Terraform versions see [here](https://endoflife.date/terraform ). + +## Supported OS and Architectures +As per [HashiCorp's recommendations](https://developer.hashicorp.com/terraform/registry/providers/os-arch), we fully support the following operating system / architecture combinations: +- Darwin / AMD64 +- Darwin / ARMv8 +- Linux / AMD64 +- Linux / ARMv8 (sometimes referred to as AArch64 or ARM64) +- Linux / ARMv6 +- Windows / AMD64 + +We ship binaries but do not prioritize fixes for the following operating system / architecture combinations: +- Linux / 386 +- Windows / 386 +- FreeBSD / 386 +- FreeBSD / AMD64 + +## Helpful Links/Information + +[Upgrade Guide for Terraform MongoDB Atlas 0.4.0](https://www.mongodb.com/blog/post/upgrade-guide-for-terraform-mongodb-atlas-040) + +[MongoDB Atlas and Terraform Landing Page](https://www.mongodb.com/atlas/terraform) + +[Report bugs](https://github.com/mongodb/terraform-provider-mongodbatlas/issues) + +[Request Features](https://feedback.mongodb.com/forums/924145-atlas?category_id=370723) + +[Support covered by MongoDB Atlas support plans, Developer and above](https://docs.atlas.mongodb.com/support/) + +## Examples from MongoDB and the Community + + +We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.6/examples) +in our GitHub repo that will help both beginner and more advanced users. + +Have a good example you've created and want to share? +Let us know the details via an [issue](https://github.com/mongodb/terraform-provider-mongodbatlas/issues) +or submit a PR of your work to add it to the `examples` directory in our [GitHub repo](https://github.com/mongodb/terraform-provider-mongodbatlas/). diff --git a/internal/common/conversion/flatten_expand.go b/internal/common/conversion/flatten_expand.go index 14148596e3..e97b4450bc 100644 --- a/internal/common/conversion/flatten_expand.go +++ b/internal/common/conversion/flatten_expand.go @@ -3,7 +3,6 @@ package conversion import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" "go.mongodb.org/atlas-sdk/v20240805001/admin" ) @@ -29,17 +28,6 @@ func FlattenTags(tags []admin.ResourceTag) []map[string]string { return ret } -func FlattenTagsOldSDK(tags []admin20240530.ResourceTag) []map[string]string { - ret := make([]map[string]string, len(tags)) - for i, tag := range tags { - ret[i] = map[string]string{ - "key": tag.GetKey(), - "value": tag.GetValue(), - } - } - return ret -} - func ExpandTagsFromSetSchema(d *schema.ResourceData) *[]admin.ResourceTag { list := d.Get("tags").(*schema.Set) ret := make([]admin.ResourceTag, list.Len()) @@ -53,20 +41,6 @@ func ExpandTagsFromSetSchema(d *schema.ResourceData) *[]admin.ResourceTag { return &ret } -// this will be removed once ISS dev branch is merged -func ExpandTagsFromSetSchemaOldSDK(d *schema.ResourceData) *[]admin20240530.ResourceTag { - list := d.Get("tags").(*schema.Set) - ret := make([]admin20240530.ResourceTag, list.Len()) - for i, item := range list.List() { - tag := item.(map[string]any) - ret[i] = admin20240530.ResourceTag{ - Key: tag["key"].(string), - Value: tag["value"].(string), - } - } - return &ret -} - func ExpandStringList(list []any) (res []string) { for _, v := range list { res = append(res, v.(string)) diff --git a/internal/service/advancedcluster/data_source_advanced_cluster.go b/internal/service/advancedcluster/data_source_advanced_cluster.go index 0bc3a5555d..082fdc113b 100644 --- a/internal/service/advancedcluster/data_source_advanced_cluster.go +++ b/internal/service/advancedcluster/data_source_advanced_cluster.go @@ -5,7 +5,7 @@ import ( "fmt" "net/http" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -251,7 +251,7 @@ func DataSource() *schema.Resource { } func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) @@ -265,13 +265,13 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. } if !useReplicationSpecPerShard { - clusterDescOld, resp, err := connV220231115.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + clusterDescOld, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { if resp != nil { if resp.StatusCode == http.StatusNotFound { return nil } - if admin20231115.IsErrorCode(err, "ASYMMETRIC_SHARD_UNSUPPORTED") { + if admin20240530.IsErrorCode(err, "ASYMMETRIC_SHARD_UNSUPPORTED") { return diag.FromErr(fmt.Errorf("please add `use_replication_spec_per_shard = true` to your data source configuration to enable asymmetric shard support. Refer to documentation for more details. %s", err)) } } @@ -314,7 +314,7 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) } - zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220231115) + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) if err != nil { return diag.FromErr(err) } @@ -334,7 +334,7 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } - processArgs, _, err := connV220231115.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() + processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { return diag.FromErr(fmt.Errorf(ErrorAdvancedConfRead, clusterName, err)) } diff --git a/internal/service/advancedcluster/data_source_advanced_clusters.go b/internal/service/advancedcluster/data_source_advanced_clusters.go index 6f26126ac9..edd7b3e869 100644 --- a/internal/service/advancedcluster/data_source_advanced_clusters.go +++ b/internal/service/advancedcluster/data_source_advanced_clusters.go @@ -6,8 +6,8 @@ import ( "log" "net/http" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -267,7 +267,7 @@ func PluralDataSource() *schema.Resource { } func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) useReplicationSpecPerShard := false @@ -279,14 +279,14 @@ func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) } if !useReplicationSpecPerShard { - list, resp, err := connV220231115.ClustersApi.ListClusters(ctx, projectID).Execute() + list, resp, err := connV220240530.ClustersApi.ListClusters(ctx, projectID).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { return nil } return diag.FromErr(fmt.Errorf(errorListRead, projectID, err)) } - results, diags := flattenAdvancedClustersOldSDK(ctx, connV220231115, connV2, list.GetResults(), d) + results, diags := flattenAdvancedClustersOldSDK(ctx, connV220240530, connV2, list.GetResults(), d) if len(diags) > 0 { return diags } @@ -301,7 +301,7 @@ func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) } return diag.FromErr(fmt.Errorf(errorListRead, projectID, err)) } - results, diags := flattenAdvancedClusters(ctx, connV220231115, connV2, list.GetResults(), d) + results, diags := flattenAdvancedClusters(ctx, connV220240530, connV2, list.GetResults(), d) if len(diags) > 0 { return diags } @@ -312,16 +312,16 @@ func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) return nil } -func flattenAdvancedClusters(ctx context.Context, connV220231115 *admin20231115.APIClient, connV2 *admin.APIClient, clusters []admin.ClusterDescription20250101, d *schema.ResourceData) ([]map[string]any, diag.Diagnostics) { +func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530.APIClient, connV2 *admin.APIClient, clusters []admin.ClusterDescription20240805, d *schema.ResourceData) ([]map[string]any, diag.Diagnostics) { results := make([]map[string]any, 0, len(clusters)) for i := range clusters { cluster := &clusters[i] - processArgs, _, err := connV220231115.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute() + processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute() if err != nil { log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", cluster.GetId(), err) } - zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, cluster.GetGroupId(), cluster.GetName(), connV220231115) + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, cluster.GetGroupId(), cluster.GetName(), connV220240530) if err != nil { return nil, diag.FromErr(err) } @@ -359,16 +359,16 @@ func flattenAdvancedClusters(ctx context.Context, connV220231115 *admin20231115. return results, nil } -func flattenAdvancedClustersOldSDK(ctx context.Context, connV220231115 *admin20231115.APIClient, connV2 *admin.APIClient, clusters []admin20231115.AdvancedClusterDescription, d *schema.ResourceData) ([]map[string]any, diag.Diagnostics) { +func flattenAdvancedClustersOldSDK(ctx context.Context, connV20240530 *admin20240530.APIClient, connV2 *admin.APIClient, clusters []admin20240530.AdvancedClusterDescription, d *schema.ResourceData) ([]map[string]any, diag.Diagnostics) { results := make([]map[string]any, 0, len(clusters)) for i := range clusters { cluster := &clusters[i] - processArgs, _, err := connV220231115.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute() + processArgs, _, err := connV20240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute() if err != nil { log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", cluster.GetId(), err) } - zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, cluster.GetGroupId(), cluster.GetName(), connV220231115) + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, cluster.GetGroupId(), cluster.GetName(), connV20240530) if err != nil { return nil, diag.FromErr(err) } diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 4bf3d1a3f5..c02c7d9f03 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -9,8 +9,8 @@ import ( "slices" "strings" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -284,7 +284,7 @@ func IsSharedTier(instanceSize string) bool { // GetDiskSizeGBFromReplicationSpec obtains the diskSizeGB value by looking into the electable spec of the first replication spec. // Independent storage size scaling is not supported (CLOUDP-201331), meaning all electable/analytics/readOnly configs in all replication specs are the same. -func GetDiskSizeGBFromReplicationSpec(cluster *admin.ClusterDescription20250101) float64 { +func GetDiskSizeGBFromReplicationSpec(cluster *admin.ClusterDescription20240805) float64 { specs := cluster.GetReplicationSpecs() if len(specs) < 1 { return 0 @@ -321,7 +321,7 @@ func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admi } } -func ResourceClusterListAdvancedRefreshFunc(ctx context.Context, projectID string, clustersAPI admin20240530.ClustersApi) retry.StateRefreshFunc { +func ResourceClusterListAdvancedRefreshFunc(ctx context.Context, projectID string, clustersAPI admin.ClustersApi) retry.StateRefreshFunc { return func() (any, string, error) { clusters, resp, err := clustersAPI.ListClusters(ctx, projectID).Execute() @@ -360,7 +360,7 @@ func FormatMongoDBMajorVersion(val any) string { return fmt.Sprintf("%.1f", cast.ToFloat32(val)) } -func flattenLabels(l []admin20240530.ComponentLabel) []map[string]string { +func flattenLabels(l []admin.ComponentLabel) []map[string]string { labels := make([]map[string]string, 0, len(l)) for _, item := range l { if item.GetKey() == ignoreLabel { @@ -398,7 +398,7 @@ func flattenConnectionStrings(str admin.ClusterConnectionStrings) []map[string]a } } -func flattenPrivateEndpoint(privateEndpoints []admin20240530.ClusterDescriptionConnectionStringsPrivateEndpoint) []map[string]any { +func flattenPrivateEndpoint(privateEndpoints []admin.ClusterDescriptionConnectionStringsPrivateEndpoint) []map[string]any { endpoints := make([]map[string]any, 0, len(privateEndpoints)) for _, endpoint := range privateEndpoints { endpoints = append(endpoints, map[string]any{ @@ -412,7 +412,7 @@ func flattenPrivateEndpoint(privateEndpoints []admin20240530.ClusterDescriptionC return endpoints } -func flattenEndpoints(listEndpoints []admin20240530.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) []map[string]any { +func flattenEndpoints(listEndpoints []admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) []map[string]any { endpoints := make([]map[string]any, 0, len(listEndpoints)) for _, endpoint := range listEndpoints { endpoints = append(endpoints, map[string]any{ @@ -433,11 +433,11 @@ func flattenBiConnectorConfig(biConnector *admin.BiConnector) []map[string]any { } } -func expandBiConnectorConfig(d *schema.ResourceData) *admin20240530.BiConnector { +func expandBiConnectorConfig(d *schema.ResourceData) *admin.BiConnector { if v, ok := d.GetOk("bi_connector_config"); ok { if biConn := v.([]any); len(biConn) > 0 { biConnMap := biConn[0].(map[string]any) - return &admin20240530.BiConnector{ + return &admin.BiConnector{ Enabled: conversion.Pointer(cast.ToBool(biConnMap["enabled"])), ReadPreference: conversion.StringPtr(cast.ToString(biConnMap["read_preference"])), } @@ -446,7 +446,7 @@ func expandBiConnectorConfig(d *schema.ResourceData) *admin20240530.BiConnector return nil } -func flattenProcessArgs(p *admin20231115.ClusterDescriptionProcessArgs) []map[string]any { +func flattenProcessArgs(p *admin20240530.ClusterDescriptionProcessArgs) []map[string]any { if p == nil { return nil } @@ -467,28 +467,28 @@ func flattenProcessArgs(p *admin20231115.ClusterDescriptionProcessArgs) []map[st } } -func FlattenAdvancedReplicationSpecsOldSDK(ctx context.Context, apiObjects []admin20231115.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObjects []any, +func FlattenAdvancedReplicationSpecsOldSDK(ctx context.Context, apiObjects []admin20240530.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObjects []any, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { // for flattening old model we need information of value defined at root disk_size_gb so we set the value in new location under hardware specs - replicationSpecFlattener := func(ctx context.Context, sdkModel *admin20231115.ReplicationSpec, tfModel map[string]any, resourceData *schema.ResourceData, client *admin.APIClient) (map[string]any, error) { + replicationSpecFlattener := func(ctx context.Context, sdkModel *admin20240530.ReplicationSpec, tfModel map[string]any, resourceData *schema.ResourceData, client *admin.APIClient) (map[string]any, error) { return flattenAdvancedReplicationSpecOldSDK(ctx, sdkModel, zoneNameToZoneIDs, rootDiskSizeGB, tfModel, resourceData, connV2) } - return flattenAdvancedReplicationSpecsLogic[admin20231115.ReplicationSpec](ctx, apiObjects, tfMapObjects, d, + return flattenAdvancedReplicationSpecsLogic[admin20240530.ReplicationSpec](ctx, apiObjects, tfMapObjects, d, doesAdvancedReplicationSpecMatchAPIOldSDK, replicationSpecFlattener, connV2) } -func flattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.ReplicationSpec20250101, zoneNameToOldReplicationSpecIDs map[string]string, tfMapObjects []any, +func flattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, tfMapObjects []any, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { // for flattening new model we need information of replication spec ids associated to old API to avoid breaking changes for users referencing replication_specs.*.id - replicationSpecFlattener := func(ctx context.Context, sdkModel *admin.ReplicationSpec20250101, tfModel map[string]any, resourceData *schema.ResourceData, client *admin.APIClient) (map[string]any, error) { + replicationSpecFlattener := func(ctx context.Context, sdkModel *admin.ReplicationSpec20240805, tfModel map[string]any, resourceData *schema.ResourceData, client *admin.APIClient) (map[string]any, error) { return flattenAdvancedReplicationSpec(ctx, sdkModel, zoneNameToOldReplicationSpecIDs, tfModel, resourceData, connV2) } - return flattenAdvancedReplicationSpecsLogic[admin.ReplicationSpec20250101](ctx, apiObjects, tfMapObjects, d, + return flattenAdvancedReplicationSpecsLogic[admin.ReplicationSpec20240805](ctx, apiObjects, tfMapObjects, d, doesAdvancedReplicationSpecMatchAPI, replicationSpecFlattener, connV2) } type ReplicationSpecSDKModel interface { - admin20231115.ReplicationSpec | admin.ReplicationSpec20250101 + admin20240530.ReplicationSpec | admin.ReplicationSpec20240805 } func flattenAdvancedReplicationSpecsLogic[T ReplicationSpecSDKModel]( @@ -552,15 +552,15 @@ func flattenAdvancedReplicationSpecsLogic[T ReplicationSpecSDKModel]( return tfList, nil } -func doesAdvancedReplicationSpecMatchAPIOldSDK(tfObject map[string]any, apiObject *admin20231115.ReplicationSpec) bool { +func doesAdvancedReplicationSpecMatchAPIOldSDK(tfObject map[string]any, apiObject *admin20240530.ReplicationSpec) bool { return tfObject["id"] == apiObject.GetId() || (tfObject["id"] == nil && tfObject["zone_name"] == apiObject.GetZoneName()) } -func doesAdvancedReplicationSpecMatchAPI(tfObject map[string]any, apiObject *admin.ReplicationSpec20250101) bool { +func doesAdvancedReplicationSpecMatchAPI(tfObject map[string]any, apiObject *admin.ReplicationSpec20240805) bool { return tfObject["external_id"] == apiObject.GetId() } -func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects []admin.CloudRegionConfig20250101, tfMapObjects []any, +func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects []admin.CloudRegionConfig20240805, tfMapObjects []any, d *schema.ResourceData, connV2 *admin.APIClient) (tfResult []map[string]any, containersIDs map[string]string, err error) { if len(apiObjects) == 0 { return nil, nil, nil @@ -579,11 +579,11 @@ func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects } if apiObject.GetProviderName() != "TENANT" { - params := &admin20240530.ListPeeringContainerByCloudProviderApiParams{ + params := &admin.ListPeeringContainerByCloudProviderApiParams{ GroupId: d.Get("project_id").(string), ProviderName: apiObject.ProviderName, } - containers, _, err := connV220240530.NetworkPeeringApi.ListPeeringContainerByCloudProviderWithParams(ctx, params).Execute() + containers, _, err := connV2.NetworkPeeringApi.ListPeeringContainerByCloudProviderWithParams(ctx, params).Execute() if err != nil { return nil, nil, err } @@ -596,7 +596,7 @@ func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects return tfList, containerIDs, nil } -func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin.CloudRegionConfig20250101, tfMapObject map[string]any) map[string]any { +func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin.CloudRegionConfig20240805, tfMapObject map[string]any) map[string]any { if apiObject == nil { return nil } @@ -634,11 +634,11 @@ func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin.CloudRegionConf return tfMap } -func hwSpecToDedicatedHwSpec(apiObject *admin.HardwareSpec20250101) *admin.DedicatedHardwareSpec20250101 { +func hwSpecToDedicatedHwSpec(apiObject *admin.HardwareSpec20240805) *admin.DedicatedHardwareSpec20240805 { if apiObject == nil { return nil } - return &admin.DedicatedHardwareSpec20250101{ + return &admin.DedicatedHardwareSpec20240805{ NodeCount: apiObject.NodeCount, DiskIOPS: apiObject.DiskIOPS, EbsVolumeType: apiObject.EbsVolumeType, @@ -647,11 +647,11 @@ func hwSpecToDedicatedHwSpec(apiObject *admin.HardwareSpec20250101) *admin.Dedic } } -func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec20250101) *admin.HardwareSpec20250101 { +func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec20240805) *admin.HardwareSpec20240805 { if apiObject == nil { return nil } - return &admin.HardwareSpec20250101{ + return &admin.HardwareSpec20240805{ DiskSizeGB: apiObject.DiskSizeGB, NodeCount: apiObject.NodeCount, DiskIOPS: apiObject.DiskIOPS, @@ -660,7 +660,7 @@ func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec20250101) *ad } } -func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHardwareSpec20250101, providerName string, tfMapObjects []any) []map[string]any { +func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHardwareSpec20240805, providerName string, tfMapObjects []any) []map[string]any { if apiObject == nil { return nil } @@ -671,14 +671,11 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHa if len(tfMapObjects) > 0 { tfMapObject := tfMapObjects[0].(map[string]any) - if providerName == constant.AWS || providerName == constant.AZURE { if providerName == constant.AWS || providerName == constant.AZURE { if cast.ToInt64(apiObject.GetDiskIOPS()) > 0 { tfMap["disk_iops"] = apiObject.GetDiskIOPS() } } - if providerName == constant.AWS { - } if providerName == constant.AWS { if v, ok := tfMapObject["ebs_volume_type"]; ok && v.(string) != "" { tfMap["ebs_volume_type"] = apiObject.GetEbsVolumeType() @@ -705,7 +702,7 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHa return tfList } -func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin20240530.AdvancedAutoScalingSettings) []map[string]any { +func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin.AdvancedAutoScalingSettings) []map[string]any { if apiObject == nil { return nil } @@ -724,7 +721,7 @@ func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin20240530.Advanced return tfList } -func getAdvancedClusterContainerID(containers []admin.CloudProviderContainer, cluster *admin.CloudRegionConfig20250101) string { +func getAdvancedClusterContainerID(containers []admin.CloudProviderContainer, cluster *admin.CloudRegionConfig20240805) string { if len(containers) == 0 { return "" } @@ -741,8 +738,8 @@ func getAdvancedClusterContainerID(containers []admin.CloudProviderContainer, cl return "" } -func expandProcessArgs(d *schema.ResourceData, p map[string]any) admin20231115.ClusterDescriptionProcessArgs { - res := admin20231115.ClusterDescriptionProcessArgs{} +func expandProcessArgs(d *schema.ResourceData, p map[string]any) admin20240530.ClusterDescriptionProcessArgs { + res := admin20240530.ClusterDescriptionProcessArgs{} if _, ok := d.GetOkExists("advanced_configuration.0.default_read_concern"); ok { res.DefaultReadConcern = conversion.StringPtr(cast.ToString(p["default_read_concern"])) @@ -802,16 +799,16 @@ func expandProcessArgs(d *schema.ResourceData, p map[string]any) admin20231115.C return res } -func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin20240530.ComponentLabel, diag.Diagnostics) { +func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin.ComponentLabel, diag.Diagnostics) { list := d.Get("labels").(*schema.Set) - res := make([]admin20240530.ComponentLabel, list.Len()) + res := make([]admin.ComponentLabel, list.Len()) for i, val := range list.List() { v := val.(map[string]any) key := v["key"].(string) if key == ignoreLabel { return nil, diag.FromErr(fmt.Errorf("you should not set `Infrastructure Tool` label, it is used for internal purposes")) } - res[i] = admin20240530.ComponentLabel{ + res[i] = admin.ComponentLabel{ Key: conversion.StringPtr(key), Value: conversion.StringPtr(v["value"].(string)), } @@ -819,8 +816,8 @@ func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin20240530.Comp return res, nil } -func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]admin.ReplicationSpec20250101 { - var apiObjects []admin.ReplicationSpec20250101 +func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]admin.ReplicationSpec20240805 { + var apiObjects []admin.ReplicationSpec20240805 for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { @@ -841,8 +838,8 @@ func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]ad return &apiObjects } -func expandAdvancedReplicationSpecsOldSDK(tfList []any) *[]admin20231115.ReplicationSpec { - var apiObjects []admin20231115.ReplicationSpec +func expandAdvancedReplicationSpecsOldSDK(tfList []any) *[]admin20240530.ReplicationSpec { + var apiObjects []admin20240530.ReplicationSpec for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { @@ -857,8 +854,8 @@ func expandAdvancedReplicationSpecsOldSDK(tfList []any) *[]admin20231115.Replica return &apiObjects } -func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64) *admin.ReplicationSpec20250101 { - apiObject := &admin.ReplicationSpec20250101{ +func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64) *admin.ReplicationSpec20240805 { + apiObject := &admin.ReplicationSpec20240805{ ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), RegionConfigs: expandRegionConfigs(tfMap["region_configs"].([]any), rootDiskSizeGB), } @@ -868,8 +865,8 @@ func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64 return apiObject } -func expandAdvancedReplicationSpecOldSDK(tfMap map[string]any) *admin20231115.ReplicationSpec { - apiObject := &admin20231115.ReplicationSpec{ +func expandAdvancedReplicationSpecOldSDK(tfMap map[string]any) *admin20240530.ReplicationSpec { + apiObject := &admin20240530.ReplicationSpec{ NumShards: conversion.Pointer(tfMap["num_shards"].(int)), ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), RegionConfigs: convertRegionConfigSliceToOldSDK(expandRegionConfigs(tfMap["region_configs"].([]any), nil)), @@ -880,8 +877,8 @@ func expandAdvancedReplicationSpecOldSDK(tfMap map[string]any) *admin20231115.Re return apiObject } -func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin.CloudRegionConfig20250101 { - var apiObjects []admin.CloudRegionConfig20250101 +func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin.CloudRegionConfig20240805 { + var apiObjects []admin.CloudRegionConfig20240805 for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { @@ -896,9 +893,9 @@ func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin.CloudRe return &apiObjects } -func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin.CloudRegionConfig20250101 { +func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin.CloudRegionConfig20240805 { providerName := tfMap["provider_name"].(string) - apiObject := &admin.CloudRegionConfig20250101{ + apiObject := &admin.CloudRegionConfig20240805{ Priority: conversion.Pointer(cast.ToInt(tfMap["priority"])), ProviderName: conversion.StringPtr(providerName), RegionName: conversion.StringPtr(tfMap["region_name"].(string)), @@ -925,16 +922,14 @@ func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin.Cl return apiObject } -func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *float64) *admin.DedicatedHardwareSpec20250101 { +func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *float64) *admin.DedicatedHardwareSpec20240805 { tfMap, _ := tfList[0].(map[string]any) - apiObject := new(admin.DedicatedHardwareSpec20250101) + apiObject := new(admin.DedicatedHardwareSpec20240805) if providerName == constant.AWS || providerName == constant.AZURE { if v, ok := tfMap["disk_iops"]; ok && v.(int) > 0 { apiObject.DiskIOPS = conversion.Pointer(v.(int)) } } - if providerName == constant.AWS { - } if providerName == constant.AWS { if v, ok := tfMap["ebs_volume_type"]; ok { apiObject.EbsVolumeType = conversion.StringPtr(v.(string)) @@ -959,11 +954,11 @@ func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *f return apiObject } -func expandRegionConfigAutoScaling(tfList []any) *admin20240530.AdvancedAutoScalingSettings { +func expandRegionConfigAutoScaling(tfList []any) *admin.AdvancedAutoScalingSettings { tfMap, _ := tfList[0].(map[string]any) - settings := admin20240530.AdvancedAutoScalingSettings{ - DiskGB: new(admin20240530.DiskGBAutoScaling), - Compute: new(admin20240530.AdvancedComputeAutoScaling), + settings := admin.AdvancedAutoScalingSettings{ + DiskGB: new(admin.DiskGBAutoScaling), + Compute: new(admin.AdvancedComputeAutoScaling), } if v, ok := tfMap["disk_gb_enabled"]; ok { @@ -990,7 +985,7 @@ func expandRegionConfigAutoScaling(tfList []any) *admin20240530.AdvancedAutoScal return &settings } -func flattenAdvancedReplicationSpecsDS(ctx context.Context, apiRepSpecs []admin.ReplicationSpec20250101, zoneNameToOldReplicationSpecIDs map[string]string, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { +func flattenAdvancedReplicationSpecsDS(ctx context.Context, apiRepSpecs []admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { if len(apiRepSpecs) == 0 { return nil, nil } @@ -1007,7 +1002,7 @@ func flattenAdvancedReplicationSpecsDS(ctx context.Context, apiRepSpecs []admin. return tfList, nil } -func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.ReplicationSpec20250101, zoneNameToOldReplicationSpecIDs map[string]string, tfMapObject map[string]any, +func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, tfMapObject map[string]any, d *schema.ResourceData, connV2 *admin.APIClient) (map[string]any, error) { if apiObject == nil { return nil, nil @@ -1044,7 +1039,7 @@ func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.Replic return tfMap, nil } -func flattenAdvancedReplicationSpecOldSDK(ctx context.Context, apiObject *admin20231115.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObject map[string]any, +func flattenAdvancedReplicationSpecOldSDK(ctx context.Context, apiObject *admin20240530.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObject map[string]any, d *schema.ResourceData, connV2 *admin.APIClient) (map[string]any, error) { if apiObject == nil { return nil, nil diff --git a/internal/service/advancedcluster/model_advanced_cluster_test.go b/internal/service/advancedcluster/model_advanced_cluster_test.go index aa6156f73f..5f66222a7f 100644 --- a/internal/service/advancedcluster/model_advanced_cluster_test.go +++ b/internal/service/advancedcluster/model_advanced_cluster_test.go @@ -7,10 +7,10 @@ import ( "net/http" "testing" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" - "go.mongodb.org/atlas-sdk/v20240530002/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" @@ -25,7 +25,7 @@ var ( dummyClusterName = "clusterName" dummyProjectID = "projectId" errGeneric = errors.New("generic") - advancedClusters = []admin.ClusterDescription20250101{{StateName: conversion.StringPtr("NOT IDLE")}} + advancedClusters = []admin.ClusterDescription20240805{{StateName: conversion.StringPtr("NOT IDLE")}} ) func TestFlattenReplicationSpecs(t *testing.T) { @@ -36,7 +36,7 @@ func TestFlattenReplicationSpecs(t *testing.T) { unexpectedID = "id2" expectedZoneName = "z1" unexpectedZoneName = "z2" - regionConfigAdmin = []admin20231115.CloudRegionConfig{{ + regionConfigAdmin = []admin20240530.CloudRegionConfig{{ ProviderName: &providerName, RegionName: ®ionName, }} @@ -49,8 +49,8 @@ func TestFlattenReplicationSpecs(t *testing.T) { "region_name": regionName, "zone_name": unexpectedZoneName, } - apiSpecExpected = admin20231115.ReplicationSpec{Id: &expectedID, ZoneName: &expectedZoneName, RegionConfigs: ®ionConfigAdmin} - apiSpecDifferent = admin20231115.ReplicationSpec{Id: &unexpectedID, ZoneName: &unexpectedZoneName, RegionConfigs: ®ionConfigAdmin} + apiSpecExpected = admin20240530.ReplicationSpec{Id: &expectedID, ZoneName: &expectedZoneName, RegionConfigs: ®ionConfigAdmin} + apiSpecDifferent = admin20240530.ReplicationSpec{Id: &unexpectedID, ZoneName: &unexpectedZoneName, RegionConfigs: ®ionConfigAdmin} testSchema = map[string]*schema.Schema{ "project_id": {Type: schema.TypeString}, } @@ -80,60 +80,60 @@ func TestFlattenReplicationSpecs(t *testing.T) { } ) testCases := map[string]struct { - adminSpecs []admin20231115.ReplicationSpec + adminSpecs []admin20240530.ReplicationSpec tfInputSpecs []any expectedLen int }{ "empty admin spec should return empty list": { - []admin20231115.ReplicationSpec{}, + []admin20240530.ReplicationSpec{}, []any{tfSameIDSameZone}, 0, }, "existing id, should match admin": { - []admin20231115.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfSameIDSameZone}, 1, }, "existing different id, should change to admin spec": { - []admin20231115.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfdiffIDDiffZone}, 1, }, "missing id, should be set when zone_name matches": { - []admin20231115.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfNoIDSameZone}, 1, }, "missing id and diff zone, should change to admin spec": { - []admin20231115.ReplicationSpec{apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecExpected}, []any{tfNoIDDiffZone}, 1, }, "existing id, should match correct api spec using `id` and extra api spec added": { - []admin20231115.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, []any{tfSameIDSameZone}, 2, }, "missing id, should match correct api spec using `zone_name` and extra api spec added": { - []admin20231115.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, + []admin20240530.ReplicationSpec{apiSpecDifferent, apiSpecExpected}, []any{tfNoIDSameZone}, 2, }, "two matching specs should be set to api specs": { - []admin20231115.ReplicationSpec{apiSpecExpected, apiSpecDifferent}, + []admin20240530.ReplicationSpec{apiSpecExpected, apiSpecDifferent}, []any{tfSameIDSameZone, tfdiffIDDiffZone}, 2, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - peeringAPI := mockadmin20240530.NetworkPeeringApi{} + peeringAPI := mockadmin.NetworkPeeringApi{} - peeringAPI.EXPECT().ListPeeringContainerByCloudProviderWithParams(mock.Anything, mock.Anything).Return(admin20240530.ListPeeringContainerByCloudProviderApiRequest{ApiService: &peeringAPI}) - containerResult := []admin20240530.CloudProviderContainer{{Id: conversion.StringPtr("c1"), RegionName: ®ionName, ProviderName: &providerName}} - peeringAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.Anything).Return(&admin20240530.PaginatedCloudProviderContainer{Results: &containerResult}, nil, nil) + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderWithParams(mock.Anything, mock.Anything).Return(admin.ListPeeringContainerByCloudProviderApiRequest{ApiService: &peeringAPI}) + containerResult := []admin.CloudProviderContainer{{Id: conversion.StringPtr("c1"), RegionName: ®ionName, ProviderName: &providerName}} + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.Anything).Return(&admin.PaginatedCloudProviderContainer{Results: &containerResult}, nil, nil) - client := &admin20240530.APIClient{ + client := &admin.APIClient{ NetworkPeeringApi: &peeringAPI, } resourceData := schema.TestResourceDataRaw(t, testSchema, map[string]any{"project_id": "p1"}) @@ -154,14 +154,14 @@ func TestGetDiskSizeGBFromReplicationSpec(t *testing.T) { diskSizeGBValue := 40.0 testCases := map[string]struct { - clusterDescription admin.ClusterDescription20250101 + clusterDescription admin.ClusterDescription20240805 expectedDiskSizeResult float64 }{ "cluster description with disk size gb value at electable spec": { - clusterDescription: admin.ClusterDescription20250101{ - ReplicationSpecs: &[]admin.ReplicationSpec20250101{{ - RegionConfigs: &[]admin.CloudRegionConfig20250101{{ - ElectableSpecs: &admin.HardwareSpec20250101{ + clusterDescription: admin.ClusterDescription20240805{ + ReplicationSpecs: &[]admin.ReplicationSpec20240805{{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{{ + ElectableSpecs: &admin.HardwareSpec20240805{ DiskSizeGB: admin.PtrFloat64(diskSizeGBValue), }, }}, @@ -170,15 +170,15 @@ func TestGetDiskSizeGBFromReplicationSpec(t *testing.T) { expectedDiskSizeResult: diskSizeGBValue, }, "cluster description with no electable spec": { - clusterDescription: admin.ClusterDescription20250101{ - ReplicationSpecs: &[]admin.ReplicationSpec20250101{ - {RegionConfigs: &[]admin.CloudRegionConfig20250101{{}}}, + clusterDescription: admin.ClusterDescription20240805{ + ReplicationSpecs: &[]admin.ReplicationSpec20240805{ + {RegionConfigs: &[]admin.CloudRegionConfig20240805{{}}}, }, }, expectedDiskSizeResult: 0, }, "cluster description with no replication spec": { - clusterDescription: admin.ClusterDescription20250101{}, + clusterDescription: admin.ClusterDescription20240805{}, expectedDiskSizeResult: 0, }, } @@ -198,7 +198,7 @@ type Result struct { func TestUpgradeRefreshFunc(t *testing.T) { testCases := []struct { - mockCluster *admin.ClusterDescription20250101 + mockCluster *admin.ClusterDescription20240805 mockResponse *http.Response expectedResult Result mockError error @@ -260,11 +260,11 @@ func TestUpgradeRefreshFunc(t *testing.T) { }, { name: "Successful", - mockCluster: &admin.ClusterDescription20250101{StateName: conversion.StringPtr("stateName")}, + mockCluster: &admin.ClusterDescription20240805{StateName: conversion.StringPtr("stateName")}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ - response: &admin.ClusterDescription20250101{StateName: conversion.StringPtr("stateName")}, + response: &admin.ClusterDescription20240805{StateName: conversion.StringPtr("stateName")}, state: "stateName", error: nil, }, @@ -273,9 +273,9 @@ func TestUpgradeRefreshFunc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testObject := mockadmin20240530.NewClustersApi(t) + testObject := mockadmin.NewClustersApi(t) - testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin20240530.GetClusterApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin.GetClusterApiRequest{ApiService: testObject}).Once() testObject.EXPECT().GetClusterExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() result, stateName, err := advancedcluster.UpgradeRefreshFunc(context.Background(), dummyClusterName, dummyProjectID, testObject)() @@ -292,7 +292,7 @@ func TestUpgradeRefreshFunc(t *testing.T) { func TestResourceListAdvancedRefreshFunc(t *testing.T) { testCases := []struct { - mockCluster *admin.PaginatedClusterDescription20250101 + mockCluster *admin.PaginatedClusterDescription20240805 mockResponse *http.Response expectedResult Result mockError error @@ -354,7 +354,7 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { }, { name: "Successful but with at least one cluster not idle", - mockCluster: &admin.PaginatedClusterDescription20250101{Results: &advancedClusters}, + mockCluster: &admin.PaginatedClusterDescription20240805{Results: &advancedClusters}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ @@ -365,11 +365,11 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { }, { name: "Successful", - mockCluster: &admin.PaginatedClusterDescription20250101{}, + mockCluster: &admin.PaginatedClusterDescription20240805{}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ - response: &admin.PaginatedClusterDescription20250101{}, + response: &admin.PaginatedClusterDescription20240805{}, state: "IDLE", error: nil, }, @@ -378,9 +378,9 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testObject := mockadmin20240530.NewClustersApi(t) + testObject := mockadmin.NewClustersApi(t) - testObject.EXPECT().ListClusters(mock.Anything, mock.Anything).Return(admin20240530.ListClustersApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().ListClusters(mock.Anything, mock.Anything).Return(admin.ListClustersApiRequest{ApiService: testObject}).Once() testObject.EXPECT().ListClustersExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() result, stateName, err := advancedcluster.ResourceClusterListAdvancedRefreshFunc(context.Background(), dummyProjectID, testObject)() diff --git a/internal/service/advancedcluster/model_sdk_version_conversion.go b/internal/service/advancedcluster/model_sdk_version_conversion.go index 8cafc0357e..dcc3559dec 100644 --- a/internal/service/advancedcluster/model_sdk_version_conversion.go +++ b/internal/service/advancedcluster/model_sdk_version_conversion.go @@ -1,8 +1,8 @@ package advancedcluster import ( - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" ) @@ -11,7 +11,7 @@ import ( // - These functions must not contain any business logic. // - All will be removed once we rely on a single API version. -func convertTagsPtrToLatest(tags *[]admin20231115.ResourceTag) *[]admin.ResourceTag { +func convertTagsPtrToLatest(tags *[]admin20240530.ResourceTag) *[]admin.ResourceTag { if tags == nil { return nil } @@ -19,15 +19,15 @@ func convertTagsPtrToLatest(tags *[]admin20231115.ResourceTag) *[]admin.Resource return &result } -func convertTagsPtrToOldSDK(tags *[]admin.ResourceTag) *[]admin20231115.ResourceTag { +func convertTagsPtrToOldSDK(tags *[]admin.ResourceTag) *[]admin20240530.ResourceTag { if tags == nil { return nil } tagsSlice := *tags - results := make([]admin20231115.ResourceTag, len(tagsSlice)) + results := make([]admin20240530.ResourceTag, len(tagsSlice)) for i := range len(tagsSlice) { tag := tagsSlice[i] - results[i] = admin20231115.ResourceTag{ + results[i] = admin20240530.ResourceTag{ Key: tag.Key, Value: tag.Value, } @@ -35,7 +35,7 @@ func convertTagsPtrToOldSDK(tags *[]admin.ResourceTag) *[]admin20231115.Resource return &results } -func convertTagsToLatest(tags []admin20231115.ResourceTag) []admin.ResourceTag { +func convertTagsToLatest(tags []admin20240530.ResourceTag) []admin.ResourceTag { results := make([]admin.ResourceTag, len(tags)) for i := range len(tags) { tag := tags[i] @@ -47,24 +47,24 @@ func convertTagsToLatest(tags []admin20231115.ResourceTag) []admin.ResourceTag { return results } -func convertBiConnectToOldSDK(biconnector *admin.BiConnector) *admin20231115.BiConnector { +func convertBiConnectToOldSDK(biconnector *admin.BiConnector) *admin20240530.BiConnector { if biconnector == nil { return nil } - return &admin20231115.BiConnector{ + return &admin20240530.BiConnector{ Enabled: biconnector.Enabled, ReadPreference: biconnector.ReadPreference, } } -func convertBiConnectToLatest(biconnector *admin20231115.BiConnector) *admin.BiConnector { +func convertBiConnectToLatest(biconnector *admin20240530.BiConnector) *admin.BiConnector { return &admin.BiConnector{ Enabled: biconnector.Enabled, ReadPreference: biconnector.ReadPreference, } } -func convertConnectionStringToLatest(connStrings *admin20231115.ClusterConnectionStrings) *admin.ClusterConnectionStrings { +func convertConnectionStringToLatest(connStrings *admin20240530.ClusterConnectionStrings) *admin.ClusterConnectionStrings { return &admin.ClusterConnectionStrings{ AwsPrivateLink: connStrings.AwsPrivateLink, AwsPrivateLinkSrv: connStrings.AwsPrivateLinkSrv, @@ -76,7 +76,7 @@ func convertConnectionStringToLatest(connStrings *admin20231115.ClusterConnectio } } -func convertPrivateEndpointToLatest(privateEndpoints *[]admin20231115.ClusterDescriptionConnectionStringsPrivateEndpoint) *[]admin.ClusterDescriptionConnectionStringsPrivateEndpoint { +func convertPrivateEndpointToLatest(privateEndpoints *[]admin20240530.ClusterDescriptionConnectionStringsPrivateEndpoint) *[]admin.ClusterDescriptionConnectionStringsPrivateEndpoint { if privateEndpoints == nil { return nil } @@ -95,7 +95,7 @@ func convertPrivateEndpointToLatest(privateEndpoints *[]admin20231115.ClusterDes return &results } -func convertEndpointsToLatest(privateEndpoints *[]admin20231115.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) *[]admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint { +func convertEndpointsToLatest(privateEndpoints *[]admin20240530.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) *[]admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint { if privateEndpoints == nil { return nil } @@ -112,7 +112,7 @@ func convertEndpointsToLatest(privateEndpoints *[]admin20231115.ClusterDescripti return &results } -func convertLabelsToLatest(labels *[]admin20231115.ComponentLabel) *[]admin.ComponentLabel { +func convertLabelsToLatest(labels *[]admin20240530.ComponentLabel) *[]admin.ComponentLabel { labelSlice := *labels results := make([]admin.ComponentLabel, len(labelSlice)) for i := range len(labelSlice) { @@ -125,14 +125,14 @@ func convertLabelsToLatest(labels *[]admin20231115.ComponentLabel) *[]admin.Comp return &results } -func convertLabelSliceToOldSDK(slice []admin.ComponentLabel, err diag.Diagnostics) ([]admin20231115.ComponentLabel, diag.Diagnostics) { +func convertLabelSliceToOldSDK(slice []admin.ComponentLabel, err diag.Diagnostics) ([]admin20240530.ComponentLabel, diag.Diagnostics) { if err != nil { return nil, err } - results := make([]admin20231115.ComponentLabel, len(slice)) + results := make([]admin20240530.ComponentLabel, len(slice)) for i := range len(slice) { label := slice[i] - results[i] = admin20231115.ComponentLabel{ + results[i] = admin20240530.ComponentLabel{ Key: label.Key, Value: label.Value, } @@ -140,15 +140,15 @@ func convertLabelSliceToOldSDK(slice []admin.ComponentLabel, err diag.Diagnostic return results, nil } -func convertRegionConfigSliceToOldSDK(slice *[]admin.CloudRegionConfig20250101) *[]admin20231115.CloudRegionConfig { +func convertRegionConfigSliceToOldSDK(slice *[]admin.CloudRegionConfig20240805) *[]admin20240530.CloudRegionConfig { if slice == nil { return nil } cloudRegionSlice := *slice - results := make([]admin20231115.CloudRegionConfig, len(cloudRegionSlice)) + results := make([]admin20240530.CloudRegionConfig, len(cloudRegionSlice)) for i := range len(cloudRegionSlice) { cloudRegion := cloudRegionSlice[i] - results[i] = admin20231115.CloudRegionConfig{ + results[i] = admin20240530.CloudRegionConfig{ ElectableSpecs: convertHardwareSpecToOldSDK(cloudRegion.ElectableSpecs), Priority: cloudRegion.Priority, ProviderName: cloudRegion.ProviderName, @@ -163,11 +163,11 @@ func convertRegionConfigSliceToOldSDK(slice *[]admin.CloudRegionConfig20250101) return &results } -func convertHardwareSpecToOldSDK(hwspec *admin.HardwareSpec20250101) *admin20231115.HardwareSpec { +func convertHardwareSpecToOldSDK(hwspec *admin.HardwareSpec20240805) *admin20240530.HardwareSpec { if hwspec == nil { return nil } - return &admin20231115.HardwareSpec{ + return &admin20240530.HardwareSpec{ DiskIOPS: hwspec.DiskIOPS, EbsVolumeType: hwspec.EbsVolumeType, InstanceSize: hwspec.InstanceSize, @@ -175,21 +175,21 @@ func convertHardwareSpecToOldSDK(hwspec *admin.HardwareSpec20250101) *admin20231 } } -func convertAdvancedAutoScalingSettingsToOldSDK(settings *admin.AdvancedAutoScalingSettings) *admin20231115.AdvancedAutoScalingSettings { +func convertAdvancedAutoScalingSettingsToOldSDK(settings *admin.AdvancedAutoScalingSettings) *admin20240530.AdvancedAutoScalingSettings { if settings == nil { return nil } - return &admin20231115.AdvancedAutoScalingSettings{ + return &admin20240530.AdvancedAutoScalingSettings{ Compute: convertAdvancedComputeAutoScalingToOldSDK(settings.Compute), DiskGB: convertDiskGBAutoScalingToOldSDK(settings.DiskGB), } } -func convertAdvancedComputeAutoScalingToOldSDK(settings *admin.AdvancedComputeAutoScaling) *admin20231115.AdvancedComputeAutoScaling { +func convertAdvancedComputeAutoScalingToOldSDK(settings *admin.AdvancedComputeAutoScaling) *admin20240530.AdvancedComputeAutoScaling { if settings == nil { return nil } - return &admin20231115.AdvancedComputeAutoScaling{ + return &admin20240530.AdvancedComputeAutoScaling{ Enabled: settings.Enabled, MaxInstanceSize: settings.MaxInstanceSize, MinInstanceSize: settings.MinInstanceSize, @@ -197,20 +197,20 @@ func convertAdvancedComputeAutoScalingToOldSDK(settings *admin.AdvancedComputeAu } } -func convertDiskGBAutoScalingToOldSDK(settings *admin.DiskGBAutoScaling) *admin20231115.DiskGBAutoScaling { +func convertDiskGBAutoScalingToOldSDK(settings *admin.DiskGBAutoScaling) *admin20240530.DiskGBAutoScaling { if settings == nil { return nil } - return &admin20231115.DiskGBAutoScaling{ + return &admin20240530.DiskGBAutoScaling{ Enabled: settings.Enabled, } } -func convertDedicatedHardwareSpecToOldSDK(spec *admin.DedicatedHardwareSpec20250101) *admin20231115.DedicatedHardwareSpec { +func convertDedicatedHardwareSpecToOldSDK(spec *admin.DedicatedHardwareSpec20240805) *admin20240530.DedicatedHardwareSpec { if spec == nil { return nil } - return &admin20231115.DedicatedHardwareSpec{ + return &admin20240530.DedicatedHardwareSpec{ NodeCount: spec.NodeCount, DiskIOPS: spec.DiskIOPS, EbsVolumeType: spec.EbsVolumeType, @@ -218,11 +218,11 @@ func convertDedicatedHardwareSpecToOldSDK(spec *admin.DedicatedHardwareSpec20250 } } -func convertDedicatedHwSpecToLatest(spec *admin20231115.DedicatedHardwareSpec, rootDiskSizeGB float64) *admin.DedicatedHardwareSpec20250101 { +func convertDedicatedHwSpecToLatest(spec *admin20240530.DedicatedHardwareSpec, rootDiskSizeGB float64) *admin.DedicatedHardwareSpec20240805 { if spec == nil { return nil } - return &admin.DedicatedHardwareSpec20250101{ + return &admin.DedicatedHardwareSpec20240805{ NodeCount: spec.NodeCount, DiskIOPS: spec.DiskIOPS, EbsVolumeType: spec.EbsVolumeType, @@ -231,7 +231,7 @@ func convertDedicatedHwSpecToLatest(spec *admin20231115.DedicatedHardwareSpec, r } } -func convertAdvancedAutoScalingSettingsToLatest(settings *admin20231115.AdvancedAutoScalingSettings) *admin.AdvancedAutoScalingSettings { +func convertAdvancedAutoScalingSettingsToLatest(settings *admin20240530.AdvancedAutoScalingSettings) *admin.AdvancedAutoScalingSettings { if settings == nil { return nil } @@ -241,7 +241,7 @@ func convertAdvancedAutoScalingSettingsToLatest(settings *admin20231115.Advanced } } -func convertAdvancedComputeAutoScalingToLatest(settings *admin20231115.AdvancedComputeAutoScaling) *admin.AdvancedComputeAutoScaling { +func convertAdvancedComputeAutoScalingToLatest(settings *admin20240530.AdvancedComputeAutoScaling) *admin.AdvancedComputeAutoScaling { if settings == nil { return nil } @@ -253,7 +253,7 @@ func convertAdvancedComputeAutoScalingToLatest(settings *admin20231115.AdvancedC } } -func convertDiskGBAutoScalingToLatest(settings *admin20231115.DiskGBAutoScaling) *admin.DiskGBAutoScaling { +func convertDiskGBAutoScalingToLatest(settings *admin20240530.DiskGBAutoScaling) *admin.DiskGBAutoScaling { if settings == nil { return nil } @@ -262,11 +262,11 @@ func convertDiskGBAutoScalingToLatest(settings *admin20231115.DiskGBAutoScaling) } } -func convertHardwareSpecToLatest(hwspec *admin20231115.HardwareSpec, rootDiskSizeGB float64) *admin.HardwareSpec20250101 { +func convertHardwareSpecToLatest(hwspec *admin20240530.HardwareSpec, rootDiskSizeGB float64) *admin.HardwareSpec20240805 { if hwspec == nil { return nil } - return &admin.HardwareSpec20250101{ + return &admin.HardwareSpec20240805{ DiskIOPS: hwspec.DiskIOPS, EbsVolumeType: hwspec.EbsVolumeType, InstanceSize: hwspec.InstanceSize, @@ -275,15 +275,15 @@ func convertHardwareSpecToLatest(hwspec *admin20231115.HardwareSpec, rootDiskSiz } } -func convertRegionConfigSliceToLatest(slice *[]admin20231115.CloudRegionConfig, rootDiskSizeGB float64) *[]admin.CloudRegionConfig20250101 { +func convertRegionConfigSliceToLatest(slice *[]admin20240530.CloudRegionConfig, rootDiskSizeGB float64) *[]admin.CloudRegionConfig20240805 { if slice == nil { return nil } cloudRegionSlice := *slice - results := make([]admin.CloudRegionConfig20250101, len(cloudRegionSlice)) + results := make([]admin.CloudRegionConfig20240805, len(cloudRegionSlice)) for i := range len(cloudRegionSlice) { cloudRegion := cloudRegionSlice[i] - results[i] = admin.CloudRegionConfig20250101{ + results[i] = admin.CloudRegionConfig20240805{ ElectableSpecs: convertHardwareSpecToLatest(cloudRegion.ElectableSpecs, rootDiskSizeGB), Priority: cloudRegion.Priority, ProviderName: cloudRegion.ProviderName, @@ -298,8 +298,8 @@ func convertRegionConfigSliceToLatest(slice *[]admin20231115.CloudRegionConfig, return &results } -func convertClusterDescToLatestExcludeRepSpecs(oldClusterDesc *admin20231115.AdvancedClusterDescription) *admin.ClusterDescription20250101 { - return &admin.ClusterDescription20250101{ +func convertClusterDescToLatestExcludeRepSpecs(oldClusterDesc *admin20240530.AdvancedClusterDescription) *admin.ClusterDescription20240805 { + return &admin.ClusterDescription20240805{ BackupEnabled: oldClusterDesc.BackupEnabled, AcceptDataRisksAndForceReplicaSetReconfig: oldClusterDesc.AcceptDataRisksAndForceReplicaSetReconfig, ClusterType: oldClusterDesc.ClusterType, diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 4c3cf1be4d..5661f597d8 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -12,8 +12,8 @@ import ( "strings" "time" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -387,7 +387,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf("accept_data_risks_and_force_replica_set_reconfig can not be set in creation, only in update")) } } - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) @@ -396,7 +396,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. rootDiskSizeGB = conversion.Pointer(v.(float64)) } - params := &admin.ClusterDescription20250101{ + params := &admin.ClusterDescription20240805{ Name: conversion.StringPtr(cast.ToString(d.Get("name"))), ClusterType: conversion.StringPtr(cast.ToString(d.Get("cluster_type"))), ReplicationSpecs: expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any), rootDiskSizeGB), @@ -422,7 +422,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if _, ok := d.GetOk("tags"); ok { - params.Tags = conversion.ExpandTagsFromSetSchemaOldSDK(d) + params.Tags = conversion.ExpandTagsFromSetSchema(d) } if v, ok := d.GetOk("mongo_db_major_version"); ok { params.MongoDBMajorVersion = conversion.StringPtr(FormatMongoDBMajorVersion(v.(string))) @@ -450,13 +450,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - cluster, _, err := connV220240530.ClustersApi.CreateCluster(ctx, projectID, params).Execute() + cluster, _, err := connV2.ClustersApi.CreateCluster(ctx, projectID, params).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) } timeout := d.Timeout(schema.TimeoutCreate) - stateConf := CreateStateChangeConfig(ctx, connV220240530, projectID, d.Get("name").(string), timeout) + stateConf := CreateStateChangeConfig(ctx, connV2, projectID, d.Get("name").(string), timeout) _, err = stateConf.WaitForStateContext(ctx) if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) @@ -465,7 +465,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if ac, ok := d.GetOk("advanced_configuration"); ok { if aclist, ok := ac.([]any); ok && len(aclist) > 0 { params := expandProcessArgs(d, aclist[0].(map[string]any)) - _, _, err := connV220231115.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, cluster.GetName(), ¶ms).Execute() + _, _, err := connV220240530.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, cluster.GetName(), ¶ms).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorConfigUpdate, cluster.GetName(), err)) } @@ -473,7 +473,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if v := d.Get("paused").(bool); v { - request := &admin.ClusterDescription20250101{ + request := &admin.ClusterDescription20240805{ Paused: conversion.Pointer(v), } if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, d.Get("name").(string), request).Execute(); err != nil { @@ -493,11 +493,11 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return resourceRead(ctx, d, meta) } -func CreateStateChangeConfig(ctx context.Context, connV220240530 *admin20240530.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { +func CreateStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { return retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"}, Target: []string{"IDLE"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), Timeout: timeout, MinTimeout: 1 * time.Minute, Delay: 3 * time.Minute, @@ -505,17 +505,17 @@ func CreateStateChangeConfig(ctx context.Context, connV220240530 *admin20240530. } func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - var clusterResp *admin.ClusterDescription20250101 + var clusterResp *admin.ClusterDescription20240805 var replicationSpecs []map[string]any if isUsingOldAPISchemaStructure(d) { - clusterOldSDK, resp, err := connV220231115.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + clusterOldSDK, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { if resp != nil && resp.StatusCode == http.StatusNotFound { d.SetId("") @@ -554,7 +554,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) } - zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220231115) + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) if err != nil { return diag.FromErr(err) } @@ -576,7 +576,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } - processArgs, _, err := connV220231115.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() + processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorConfigRead, clusterName, err)) } @@ -590,9 +590,9 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di // getReplicationSpecIDsFromOldAPI returns the id values of replication specs coming from old API. This is used to populate old replication_specs.*.id attribute avoiding breaking changes. // In the old API each replications spec has a 1:1 relation with each zone, so ids are returned in a map from zoneName to id. -func getReplicationSpecIDsFromOldAPI(ctx context.Context, projectID, clusterName string, connV220231115 *admin20231115.APIClient) (map[string]string, error) { - clusterOldAPI, _, err := connV220231115.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() - if apiError, ok := admin20231115.AsError(err); ok { +func getReplicationSpecIDsFromOldAPI(ctx context.Context, projectID, clusterName string, connV220240530 *admin20240530.APIClient) (map[string]string, error) { + clusterOldAPI, _, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if apiError, ok := admin20240530.AsError(err); ok { if apiError.GetErrorCode() == "ASYMMETRIC_SHARD_UNSUPPORTED" { return nil, nil // if its the case of an asymmetric shard an error is expected in old API, replication_specs.*.id attribute will not be populated } @@ -621,7 +621,7 @@ func getZoneIDsFromNewAPI(ctx context.Context, projectID, clusterName string, co return result, nil } -func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription20250101, isResourceSchema bool) diag.Diagnostics { +func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription20240805, isResourceSchema bool) diag.Diagnostics { clusterName := *cluster.Name if isResourceSchema { @@ -738,7 +738,7 @@ func resourceUpgrade(ctx context.Context, upgradeRequest *admin.LegacyAtlasTenan projectID := ids["project_id"] clusterName := ids["cluster_name"] - upgradeResponse, _, err := upgradeCluster(ctx, connV220240530, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) + upgradeResponse, _, err := upgradeCluster(ctx, connV2, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) @@ -754,7 +754,7 @@ func resourceUpgrade(ctx context.Context, upgradeRequest *admin.LegacyAtlasTenan } func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] @@ -771,9 +771,9 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if diags != nil { return diags } - clusterChangeDetect := new(admin20231115.AdvancedClusterDescription) + clusterChangeDetect := new(admin20240530.AdvancedClusterDescription) if !reflect.DeepEqual(req, clusterChangeDetect) { - if _, _, err := connV220231115.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { + if _, _, err := connV220240530.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { @@ -785,7 +785,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if diags != nil { return diags } - clusterChangeDetect := new(admin.ClusterDescription20250101) + clusterChangeDetect := new(admin.ClusterDescription20240805) if !reflect.DeepEqual(req, clusterChangeDetect) { if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) @@ -800,8 +800,8 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. ac := d.Get("advanced_configuration") if aclist, ok := ac.([]any); ok && len(aclist) > 0 { params := expandProcessArgs(d, aclist[0].(map[string]any)) - if !reflect.DeepEqual(params, admin20231115.ClusterDescriptionProcessArgs{}) { - _, _, err := connV220231115.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() + if !reflect.DeepEqual(params, admin20240530.ClusterDescriptionProcessArgs{}) { + _, _, err := connV220240530.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorConfigUpdate, clusterName, err)) } @@ -810,7 +810,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if d.Get("paused").(bool) { - clusterRequest := &admin.ClusterDescription20250101{ + clusterRequest := &admin.ClusterDescription20240805{ Paused: conversion.Pointer(true), } if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, clusterRequest).Execute(); err != nil { @@ -824,8 +824,8 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. return resourceRead(ctx, d, meta) } -func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clusterName string, connV2 *admin.APIClient) (*admin.ClusterDescription20250101, diag.Diagnostics) { - cluster := new(admin.ClusterDescription20250101) +func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clusterName string, connV2 *admin.APIClient) (*admin.ClusterDescription20240805, diag.Diagnostics) { + cluster := new(admin.ClusterDescription20240805) if d.HasChange("replication_specs") || d.HasChange("disk_size_gb") { var updatedDiskSizeGB *float64 @@ -872,7 +872,7 @@ func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clust } if d.HasChange("tags") { - cluster.Tags = conversion.ExpandTagsFromSetSchemaOldSDK(d) + cluster.Tags = conversion.ExpandTagsFromSetSchema(d) } if d.HasChange("mongo_db_major_version") { @@ -915,8 +915,8 @@ func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clust return cluster, nil } -func updateRequestOldAPI(d *schema.ResourceData, clusterName string) (*admin20231115.AdvancedClusterDescription, diag.Diagnostics) { - cluster := new(admin20231115.AdvancedClusterDescription) +func updateRequestOldAPI(d *schema.ResourceData, clusterName string) (*admin20240530.AdvancedClusterDescription, diag.Diagnostics) { + cluster := new(admin20240530.AdvancedClusterDescription) if d.HasChange("replication_specs") { cluster.ReplicationSpecs = expandAdvancedReplicationSpecsOldSDK(d.Get("replication_specs").([]any)) @@ -1046,12 +1046,12 @@ func obtainChangeForDiskSizeGBInFirstRegion(d *schema.ResourceData) *float64 { } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - params := &admin20240530.DeleteClusterApiParams{ + params := &admin.DeleteClusterApiParams{ GroupId: projectID, ClusterName: clusterName, } @@ -1059,14 +1059,14 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. params.RetainBackups = conversion.Pointer(v.(bool)) } - _, err := connV220240530.ClustersApi.DeleteClusterWithParams(ctx, params).Execute() + _, err := connV2.ClustersApi.DeleteClusterWithParams(ctx, params).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorDelete, clusterName, err)) } log.Println("[INFO] Waiting for MongoDB ClusterAdvanced to be destroyed") - stateConf := DeleteStateChangeConfig(ctx, connV220240530, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) + stateConf := DeleteStateChangeConfig(ctx, connV2, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) // Wait, catching any errors _, err = stateConf.WaitForStateContext(ctx) if err != nil { @@ -1076,11 +1076,11 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. return nil } -func DeleteStateChangeConfig(ctx context.Context, connV220240530 *admin20240530.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { +func DeleteStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { return retry.StateChangeConf{ Pending: []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"}, Target: []string{"DELETED"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, @@ -1117,10 +1117,10 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func upgradeCluster(ctx context.Context, connV220240530 *admin20240530.APIClient, request *admin20240530.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin20240530.LegacyAtlasCluster, *http.Response, error) { +func upgradeCluster(ctx context.Context, connV2 *admin.APIClient, request *admin.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin.LegacyAtlasCluster, *http.Response, error) { request.Name = name - cluster, resp, err := connV220240530.ClustersApi.UpgradeSharedCluster(ctx, projectID, request).Execute() + cluster, resp, err := connV2.ClustersApi.UpgradeSharedCluster(ctx, projectID, request).Execute() if err != nil { return nil, nil, err } @@ -1128,7 +1128,7 @@ func upgradeCluster(ctx context.Context, connV220240530 *admin20240530.APIClient stateConf := &retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, Target: []string{"IDLE"}, - Refresh: UpgradeRefreshFunc(ctx, name, projectID, connV220240530.ClustersApi), + Refresh: UpgradeRefreshFunc(ctx, name, projectID, connV2.ClustersApi), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, @@ -1158,9 +1158,9 @@ func splitSClusterAdvancedImportID(id string) (projectID, clusterName *string, e return } -func resourceRefreshFunc(ctx context.Context, name, projectID string, connV220240530 *admin20240530.APIClient) retry.StateRefreshFunc { +func resourceRefreshFunc(ctx context.Context, name, projectID string, connV2 *admin.APIClient) retry.StateRefreshFunc { return func() (any, string, error) { - cluster, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, name).Execute() + cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, name).Execute() if err != nil && strings.Contains(err.Error(), "reset by peer") { return nil, "REPEATING", nil } @@ -1193,7 +1193,7 @@ func replicationSpecsHashSet(v any) int { return schema.HashString(buf.String()) } -func getUpgradeRequest(d *schema.ResourceData) *admin20240530.LegacyAtlasTenantClusterUpgradeRequest { +func getUpgradeRequest(d *schema.ResourceData) *admin.LegacyAtlasTenantClusterUpgradeRequest { if !d.HasChange("replication_specs") { return nil } @@ -1214,8 +1214,8 @@ func getUpgradeRequest(d *schema.ResourceData) *admin20240530.LegacyAtlasTenantC return nil } - return &admin20240530.LegacyAtlasTenantClusterUpgradeRequest{ - ProviderSettings: &admin20240530.ClusterProviderSettings{ + return &admin.LegacyAtlasTenantClusterUpgradeRequest{ + ProviderSettings: &admin.ClusterProviderSettings{ ProviderName: updatedRegion.GetProviderName(), InstanceSizeName: updatedRegion.ElectableSpecs.InstanceSize, RegionName: updatedRegion.RegionName, @@ -1227,7 +1227,7 @@ func waitForUpdateToFinish(ctx context.Context, connV2 *admin.APIClient, project stateConf := &retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, Target: []string{"IDLE"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 6b6cd245a0..495bb62ef9 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -7,8 +7,8 @@ import ( "strconv" "testing" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -239,7 +239,7 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - processArgs = &admin20231115.ClusterDescriptionProcessArgs{ + processArgs = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("1"), FailIndexKeyTooLong: conversion.Pointer(false), @@ -251,7 +251,7 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { SampleSizeBIConnector: conversion.Pointer(110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), } - processArgsUpdated = &admin20231115.ClusterDescriptionProcessArgs{ + processArgsUpdated = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("0"), FailIndexKeyTooLong: conversion.Pointer(false), @@ -287,7 +287,7 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) { projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - processArgs = &admin20231115.ClusterDescriptionProcessArgs{ + processArgs = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("1"), JavascriptEnabled: conversion.Pointer(true), @@ -297,7 +297,7 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) { SampleRefreshIntervalBIConnector: conversion.Pointer(310), SampleSizeBIConnector: conversion.Pointer(110), } - processArgsUpdated = &admin20231115.ClusterDescriptionProcessArgs{ + processArgsUpdated = &admin20240530.ClusterDescriptionProcessArgs{ DefaultReadConcern: conversion.StringPtr("available"), DefaultWriteConcern: conversion.StringPtr("majority"), JavascriptEnabled: conversion.Pointer(true), @@ -332,13 +332,13 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAutoScaling(t *testing. projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - autoScaling = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScaling = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } - autoScalingUpdated = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScalingUpdated = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } ) @@ -1145,7 +1145,7 @@ func checkSingleProviderPaused(name string, paused bool) resource.TestCheckFunc "paused": strconv.FormatBool(paused)}) } -func configAdvanced(projectID, clusterName string, p *admin20231115.ClusterDescriptionProcessArgs) string { +func configAdvanced(projectID, clusterName string, p *admin20240530.ClusterDescriptionProcessArgs) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q @@ -1211,7 +1211,7 @@ func checkAdvanced(name, tls string) resource.TestCheckFunc { resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name")) } -func configAdvancedDefaultWrite(projectID, clusterName string, p *admin20231115.ClusterDescriptionProcessArgs) string { +func configAdvancedDefaultWrite(projectID, clusterName string, p *admin20240530.ClusterDescriptionProcessArgs) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q @@ -1277,7 +1277,7 @@ func checkAdvancedDefaultWrite(name, writeConcern, tls string) resource.TestChec resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name")) } -func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin20240530.AdvancedAutoScalingSettings) string { +func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q diff --git a/internal/service/advancedcluster/resource_update_logic.go b/internal/service/advancedcluster/resource_update_logic.go index 2d6a2684c6..146fe729ad 100644 --- a/internal/service/advancedcluster/resource_update_logic.go +++ b/internal/service/advancedcluster/resource_update_logic.go @@ -6,10 +6,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) -func noIDsPopulatedInReplicationSpecs(replicationSpecs *[]admin.ReplicationSpec20250101) bool { +func noIDsPopulatedInReplicationSpecs(replicationSpecs *[]admin.ReplicationSpec20240805) bool { if replicationSpecs == nil || len(*replicationSpecs) == 0 { return false } @@ -21,7 +21,7 @@ func noIDsPopulatedInReplicationSpecs(replicationSpecs *[]admin.ReplicationSpec2 return true } -func populateIDValuesUsingNewAPI(ctx context.Context, projectID, clusterName string, connV2ClusterAPI admin.ClustersApi, replicationSpecs *[]admin.ReplicationSpec20250101) (*[]admin.ReplicationSpec20250101, diag.Diagnostics) { +func populateIDValuesUsingNewAPI(ctx context.Context, projectID, clusterName string, connV2ClusterAPI admin.ClustersApi, replicationSpecs *[]admin.ReplicationSpec20240805) (*[]admin.ReplicationSpec20240805, diag.Diagnostics) { if replicationSpecs == nil || len(*replicationSpecs) == 0 { return replicationSpecs, nil } @@ -35,7 +35,7 @@ func populateIDValuesUsingNewAPI(ctx context.Context, projectID, clusterName str return &result, nil } -func AddIDsToReplicationSpecs(replicationSpecs []admin.ReplicationSpec20250101, zoneToReplicationSpecsIDs map[string][]string) []admin.ReplicationSpec20250101 { +func AddIDsToReplicationSpecs(replicationSpecs []admin.ReplicationSpec20240805, zoneToReplicationSpecsIDs map[string][]string) []admin.ReplicationSpec20240805 { for zoneName, availableIDs := range zoneToReplicationSpecsIDs { var indexOfIDToUse = 0 for i := range replicationSpecs { @@ -52,7 +52,7 @@ func AddIDsToReplicationSpecs(replicationSpecs []admin.ReplicationSpec20250101, return replicationSpecs } -func groupIDsByZone(specs []admin.ReplicationSpec20250101) map[string][]string { +func groupIDsByZone(specs []admin.ReplicationSpec20240805) map[string][]string { result := make(map[string][]string) for _, spec := range specs { result[spec.GetZoneName()] = append(result[spec.GetZoneName()], spec.GetId()) @@ -64,7 +64,7 @@ func groupIDsByZone(specs []admin.ReplicationSpec20250101) map[string][]string { // - Existing replication specs can have the autoscaling values present in the state with default values even if not defined in the config (case when cluster is imported) // - API expects autoScaling and analyticsAutoScaling aligned cross all region configs in the PATCH request // This function is needed to avoid errors if a new replication spec is added, ensuring the PATCH request will have the auto scaling aligned with other replication specs when not present in config. -func SyncAutoScalingConfigs(replicationSpecs *[]admin.ReplicationSpec20250101) { +func SyncAutoScalingConfigs(replicationSpecs *[]admin.ReplicationSpec20240805) { if replicationSpecs == nil || len(*replicationSpecs) == 0 { return } @@ -85,7 +85,7 @@ func SyncAutoScalingConfigs(replicationSpecs *[]admin.ReplicationSpec20250101) { applyDefaultAutoScaling(replicationSpecs, defaultAutoScaling, defaultAnalyticsAutoScaling) } -func applyDefaultAutoScaling(replicationSpecs *[]admin.ReplicationSpec20250101, defaultAutoScaling, defaultAnalyticsAutoScaling *admin.AdvancedAutoScalingSettings) { +func applyDefaultAutoScaling(replicationSpecs *[]admin.ReplicationSpec20240805, defaultAutoScaling, defaultAnalyticsAutoScaling *admin.AdvancedAutoScalingSettings) { for _, spec := range *replicationSpecs { for i := range *spec.RegionConfigs { regionConfig := &(*spec.RegionConfigs)[i] diff --git a/internal/service/advancedcluster/resource_update_logic_test.go b/internal/service/advancedcluster/resource_update_logic_test.go index 0148eb3110..009e51e55d 100644 --- a/internal/service/advancedcluster/resource_update_logic_test.go +++ b/internal/service/advancedcluster/resource_update_logic_test.go @@ -5,17 +5,17 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestAddIDsToReplicationSpecs(t *testing.T) { testCases := map[string]struct { - ReplicationSpecs []admin.ReplicationSpec20250101 + ReplicationSpecs []admin.ReplicationSpec20240805 ZoneToReplicationSpecsIDs map[string][]string - ExpectedReplicationSpecs []admin.ReplicationSpec20250101 + ExpectedReplicationSpecs []admin.ReplicationSpec20240805 }{ "two zones with same amount of available ids and replication specs to populate": { - ReplicationSpecs: []admin.ReplicationSpec20250101{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), }, @@ -33,7 +33,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { "Zone 1": {"zone1-id1", "zone1-id2"}, "Zone 2": {"zone2-id1", "zone2-id2"}, }, - ExpectedReplicationSpecs: []admin.ReplicationSpec20250101{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), Id: admin.PtrString("zone1-id1"), @@ -53,7 +53,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { }, }, "less available ids than replication specs to populate": { - ReplicationSpecs: []admin.ReplicationSpec20250101{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), }, @@ -71,7 +71,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { "Zone 1": {"zone1-id1"}, "Zone 2": {"zone2-id1"}, }, - ExpectedReplicationSpecs: []admin.ReplicationSpec20250101{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), Id: admin.PtrString("zone1-id1"), @@ -91,7 +91,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { }, }, "more available ids than replication specs to populate": { - ReplicationSpecs: []admin.ReplicationSpec20250101{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), }, @@ -103,7 +103,7 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { "Zone 1": {"zone1-id1", "zone1-id2"}, "Zone 2": {"zone2-id1", "zone2-id2"}, }, - ExpectedReplicationSpecs: []admin.ReplicationSpec20250101{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { ZoneName: admin.PtrString("Zone 1"), Id: admin.PtrString("zone1-id1"), @@ -126,14 +126,14 @@ func TestAddIDsToReplicationSpecs(t *testing.T) { func TestSyncAutoScalingConfigs(t *testing.T) { testCases := map[string]struct { - ReplicationSpecs []admin.ReplicationSpec20250101 - ExpectedReplicationSpecs []admin.ReplicationSpec20250101 + ReplicationSpecs []admin.ReplicationSpec20240805 + ExpectedReplicationSpecs []admin.ReplicationSpec20240805 }{ "apply same autoscaling options for new replication spec which does not have autoscaling defined": { - ReplicationSpecs: []admin.ReplicationSpec20250101{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: &admin.AdvancedAutoScalingSettings{ Compute: &admin.AdvancedComputeAutoScaling{ @@ -152,7 +152,7 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: nil, AnalyticsAutoScaling: nil, @@ -160,10 +160,10 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, }, }, - ExpectedReplicationSpecs: []admin.ReplicationSpec20250101{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: &admin.AdvancedAutoScalingSettings{ Compute: &admin.AdvancedComputeAutoScaling{ @@ -182,7 +182,7 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: &admin.AdvancedAutoScalingSettings{ Compute: &admin.AdvancedComputeAutoScaling{ @@ -203,10 +203,10 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, // for this case the API will respond with an error and guide the user to align autoscaling options cross all nodes "when different autoscaling options are defined values will not be changed": { - ReplicationSpecs: []admin.ReplicationSpec20250101{ + ReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: &admin.AdvancedAutoScalingSettings{ Compute: &admin.AdvancedComputeAutoScaling{ @@ -225,7 +225,7 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: &admin.AdvancedAutoScalingSettings{ Compute: &admin.AdvancedComputeAutoScaling{ @@ -241,10 +241,10 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, }, }, - ExpectedReplicationSpecs: []admin.ReplicationSpec20250101{ + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ { Id: admin.PtrString("id-1"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: &admin.AdvancedAutoScalingSettings{ Compute: &admin.AdvancedComputeAutoScaling{ @@ -263,7 +263,7 @@ func TestSyncAutoScalingConfigs(t *testing.T) { }, { Id: admin.PtrString("id-2"), - RegionConfigs: &[]admin.CloudRegionConfig20250101{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{ { AutoScaling: &admin.AdvancedAutoScalingSettings{ Compute: &admin.AdvancedComputeAutoScaling{ diff --git a/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go index 9740219100..25510ef617 100644 --- a/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go @@ -7,8 +7,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( @@ -260,15 +260,15 @@ func DataSource() *schema.Resource { } func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) useZoneIDForCopySettings := false - var backupSchedule *admin.DiskBackupSnapshotSchedule20250101 - var backupScheduleOldSDK *admin20231115.DiskBackupSnapshotSchedule + var backupSchedule *admin.DiskBackupSnapshotSchedule20240805 + var backupScheduleOldSDK *admin20240530.DiskBackupSnapshotSchedule var copySettings []map[string]any var err error @@ -277,9 +277,9 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. } if !useZoneIDForCopySettings { - backupScheduleOldSDK, _, err = connV220231115.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + backupScheduleOldSDK, _, err = connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() if err != nil { - if apiError, ok := admin20231115.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { return diag.Errorf("%s : %s : %s", errorSnapshotBackupScheduleRead, ErrorOperationNotPermitted, AsymmetricShardsUnsupportedActionDS) } return diag.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) diff --git a/internal/service/cloudbackupschedule/model_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/model_cloud_backup_schedule.go index ed649f2e84..bd8747afee 100644 --- a/internal/service/cloudbackupschedule/model_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/model_cloud_backup_schedule.go @@ -1,8 +1,8 @@ package cloudbackupschedule import ( - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func FlattenPolicyItem(items []admin.DiskBackupApiPolicyItem, frequencyType string) []map[string]any { @@ -21,9 +21,9 @@ func FlattenPolicyItem(items []admin.DiskBackupApiPolicyItem, frequencyType stri return policyItems } -func FlattenExport(roles *admin.DiskBackupSnapshotSchedule20250101) []map[string]any { +func FlattenExport(roles *admin.DiskBackupSnapshotSchedule20240805) []map[string]any { exportList := make([]map[string]any, 0) - emptyStruct := admin.DiskBackupSnapshotSchedule20250101{} + emptyStruct := admin.DiskBackupSnapshotSchedule20240805{} if emptyStruct.GetExport() != roles.GetExport() { exportList = append(exportList, map[string]any{ "frequency_type": roles.Export.GetFrequencyType(), @@ -33,7 +33,7 @@ func FlattenExport(roles *admin.DiskBackupSnapshotSchedule20250101) []map[string return exportList } -func flattenCopySettingsOldSDK(copySettingList []admin20231115.DiskBackupCopySetting) []map[string]any { +func flattenCopySettingsOldSDK(copySettingList []admin20240530.DiskBackupCopySetting) []map[string]any { copySettings := make([]map[string]any, 0) for _, v := range copySettingList { copySettings = append(copySettings, map[string]any{ @@ -47,7 +47,7 @@ func flattenCopySettingsOldSDK(copySettingList []admin20231115.DiskBackupCopySet return copySettings } -func FlattenCopySettings(copySettingList []admin.DiskBackupCopySetting20250101) []map[string]any { +func FlattenCopySettings(copySettingList []admin.DiskBackupCopySetting20240805) []map[string]any { copySettings := make([]map[string]any, 0) for _, v := range copySettingList { copySettings = append(copySettings, map[string]any{ diff --git a/internal/service/cloudbackupschedule/model_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/model_cloud_backup_schedule_test.go index 360b7362a0..50304e7d5b 100644 --- a/internal/service/cloudbackupschedule/model_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/model_cloud_backup_schedule_test.go @@ -6,7 +6,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupschedule" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestFlattenPolicyItem(t *testing.T) { @@ -59,12 +59,12 @@ func TestFlattenPolicyItem(t *testing.T) { func TestFlattenExport(t *testing.T) { testCases := []struct { name string - roles *admin.DiskBackupSnapshotSchedule20250101 + roles *admin.DiskBackupSnapshotSchedule20240805 expected []map[string]any }{ { name: "Non-empty Export", - roles: &admin.DiskBackupSnapshotSchedule20250101{ + roles: &admin.DiskBackupSnapshotSchedule20240805{ Export: &admin.AutoExportPolicy{ FrequencyType: conversion.StringPtr("daily"), ExportBucketId: conversion.StringPtr("bucket123"), @@ -89,12 +89,12 @@ func TestFlattenExport(t *testing.T) { func TestFlattenCopySettings(t *testing.T) { testCases := []struct { name string - settings []admin.DiskBackupCopySetting20250101 + settings []admin.DiskBackupCopySetting20240805 expected []map[string]any }{ { name: "Multiple Copy Settings", - settings: []admin.DiskBackupCopySetting20250101{ + settings: []admin.DiskBackupCopySetting20240805{ { CloudProvider: conversion.StringPtr("AWS"), Frequencies: &[]string{"daily", "weekly"}, @@ -117,7 +117,7 @@ func TestFlattenCopySettings(t *testing.T) { }, { name: "Empty Copy Settings List", - settings: []admin.DiskBackupCopySetting20250101{}, + settings: []admin.DiskBackupCopySetting20240805{}, expected: []map[string]any{}, }, } diff --git a/internal/service/cloudbackupschedule/model_sdk_version_conversion.go b/internal/service/cloudbackupschedule/model_sdk_version_conversion.go index 9d219e82b5..7f156507d0 100644 --- a/internal/service/cloudbackupschedule/model_sdk_version_conversion.go +++ b/internal/service/cloudbackupschedule/model_sdk_version_conversion.go @@ -1,23 +1,23 @@ package cloudbackupschedule import ( - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) // Conversions from one SDK model version to another are used to avoid duplicating our flatten/expand conversion functions. // - These functions must not contain any business logic. // - All will be removed once we rely on a single API version. -func convertPolicyItemsToOldSDK(slice *[]admin.DiskBackupApiPolicyItem) []admin20231115.DiskBackupApiPolicyItem { +func convertPolicyItemsToOldSDK(slice *[]admin.DiskBackupApiPolicyItem) []admin20240530.DiskBackupApiPolicyItem { if slice == nil { return nil } policyItemsSlice := *slice - results := make([]admin20231115.DiskBackupApiPolicyItem, len(policyItemsSlice)) + results := make([]admin20240530.DiskBackupApiPolicyItem, len(policyItemsSlice)) for i := range len(policyItemsSlice) { policyItem := policyItemsSlice[i] - results[i] = admin20231115.DiskBackupApiPolicyItem{ + results[i] = admin20240530.DiskBackupApiPolicyItem{ FrequencyInterval: policyItem.FrequencyInterval, FrequencyType: policyItem.FrequencyType, Id: policyItem.Id, @@ -28,7 +28,7 @@ func convertPolicyItemsToOldSDK(slice *[]admin.DiskBackupApiPolicyItem) []admin2 return results } -func convertPoliciesToLatest(slice *[]admin20231115.AdvancedDiskBackupSnapshotSchedulePolicy) *[]admin.AdvancedDiskBackupSnapshotSchedulePolicy { +func convertPoliciesToLatest(slice *[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy) *[]admin.AdvancedDiskBackupSnapshotSchedulePolicy { if slice == nil { return nil } @@ -45,7 +45,7 @@ func convertPoliciesToLatest(slice *[]admin20231115.AdvancedDiskBackupSnapshotSc return &results } -func convertPolicyItemsToLatest(slice *[]admin20231115.DiskBackupApiPolicyItem) *[]admin.DiskBackupApiPolicyItem { +func convertPolicyItemsToLatest(slice *[]admin20240530.DiskBackupApiPolicyItem) *[]admin.DiskBackupApiPolicyItem { if slice == nil { return nil } @@ -64,18 +64,18 @@ func convertPolicyItemsToLatest(slice *[]admin20231115.DiskBackupApiPolicyItem) return &results } -func convertAutoExportPolicyToOldSDK(exportPolicy *admin.AutoExportPolicy) *admin20231115.AutoExportPolicy { +func convertAutoExportPolicyToOldSDK(exportPolicy *admin.AutoExportPolicy) *admin20240530.AutoExportPolicy { if exportPolicy == nil { return nil } - return &admin20231115.AutoExportPolicy{ + return &admin20240530.AutoExportPolicy{ ExportBucketId: exportPolicy.ExportBucketId, FrequencyType: exportPolicy.FrequencyType, } } -func convertAutoExportPolicyToLatest(exportPolicy *admin20231115.AutoExportPolicy) *admin.AutoExportPolicy { +func convertAutoExportPolicyToLatest(exportPolicy *admin20240530.AutoExportPolicy) *admin.AutoExportPolicy { if exportPolicy == nil { return nil } @@ -86,10 +86,10 @@ func convertAutoExportPolicyToLatest(exportPolicy *admin20231115.AutoExportPolic } } -func convertBackupScheduleReqToOldSDK(req *admin.DiskBackupSnapshotSchedule20250101, - copySettingsOldSDK *[]admin20231115.DiskBackupCopySetting, - policiesOldSDK *[]admin20231115.AdvancedDiskBackupSnapshotSchedulePolicy) *admin20231115.DiskBackupSnapshotSchedule { - return &admin20231115.DiskBackupSnapshotSchedule{ +func convertBackupScheduleReqToOldSDK(req *admin.DiskBackupSnapshotSchedule20240805, + copySettingsOldSDK *[]admin20240530.DiskBackupCopySetting, + policiesOldSDK *[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy) *admin20240530.DiskBackupSnapshotSchedule { + return &admin20240530.DiskBackupSnapshotSchedule{ CopySettings: copySettingsOldSDK, Policies: policiesOldSDK, AutoExportEnabled: req.AutoExportEnabled, @@ -102,8 +102,8 @@ func convertBackupScheduleReqToOldSDK(req *admin.DiskBackupSnapshotSchedule20250 } } -func convertBackupScheduleToLatestExcludeCopySettings(backupSchedule *admin20231115.DiskBackupSnapshotSchedule) *admin.DiskBackupSnapshotSchedule20250101 { - return &admin.DiskBackupSnapshotSchedule20250101{ +func convertBackupScheduleToLatestExcludeCopySettings(backupSchedule *admin20240530.DiskBackupSnapshotSchedule) *admin.DiskBackupSnapshotSchedule20240805 { + return &admin.DiskBackupSnapshotSchedule20240805{ Policies: convertPoliciesToLatest(backupSchedule.Policies), AutoExportEnabled: backupSchedule.AutoExportEnabled, Export: convertAutoExportPolicyToLatest(backupSchedule.Export), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go index ae56431d06..4507ec76a1 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go @@ -13,8 +13,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) const ( @@ -321,7 +321,7 @@ func Resource() *schema.Resource { func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) @@ -339,7 +339,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. diags = append(diags, diagWarning) } - if err := cloudBackupScheduleCreateOrUpdate(ctx, connV220231115, connV2, d, projectID, clusterName, true); err != nil { + if err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, connV2, d, projectID, clusterName, true); err != nil { diags = append(diags, diag.Errorf(errorSnapshotBackupScheduleCreate, err)...) return diags } @@ -353,14 +353,14 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - var backupSchedule *admin.DiskBackupSnapshotSchedule20250101 - var backupScheduleOldSDK *admin20231115.DiskBackupSnapshotSchedule + var backupSchedule *admin.DiskBackupSnapshotSchedule20240805 + var backupScheduleOldSDK *admin20240530.DiskBackupSnapshotSchedule var copySettings []map[string]any var resp *http.Response var err error @@ -371,8 +371,8 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di } if useOldAPI { - backupScheduleOldSDK, resp, err = connV220231115.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() - if apiError, ok := admin20231115.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + backupScheduleOldSDK, resp, err = connV220240530.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { return diag.Errorf("%s : %s : %s", errorSnapshotBackupScheduleRead, ErrorOperationNotPermitted, AsymmetricShardsUnsupportedAction) } if err != nil { @@ -409,7 +409,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return nil } -func setSchemaFieldsExceptCopySettings(d *schema.ResourceData, backupPolicy *admin.DiskBackupSnapshotSchedule20250101) diag.Diagnostics { +func setSchemaFieldsExceptCopySettings(d *schema.ResourceData, backupPolicy *admin.DiskBackupSnapshotSchedule20240805) diag.Diagnostics { clusterName := backupPolicy.GetClusterName() if err := d.Set("cluster_id", backupPolicy.GetClusterId()); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "cluster_id", clusterName, err) @@ -470,7 +470,7 @@ func setSchemaFieldsExceptCopySettings(d *schema.ResourceData, backupPolicy *adm } func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220231115 := meta.(*config.MongoDBClient).AtlasV220231115 + connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) @@ -483,7 +483,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - err := cloudBackupScheduleCreateOrUpdate(ctx, connV220231115, connV2, d, projectID, clusterName, false) + err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, connV2, d, projectID, clusterName, false) if err != nil { return diag.Errorf(errorSnapshotBackupScheduleUpdate, err) } @@ -539,7 +539,7 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220231115 *admin20231115.APIClient, connV2 *admin.APIClient, d *schema.ResourceData, projectID, clusterName string, isCreate bool) error { +func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220240530 *admin20240530.APIClient, connV2 *admin.APIClient, d *schema.ResourceData, projectID, clusterName string, isCreate bool) error { var err error copySettings := d.Get("copy_settings") @@ -548,7 +548,7 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220231115 *admi return err } - req := &admin.DiskBackupSnapshotSchedule20250101{} + req := &admin.DiskBackupSnapshotSchedule20240805{} var policiesItem []admin.DiskBackupApiPolicyItem if v, ok := d.GetOk("policy_item_hourly"); ok { @@ -595,14 +595,14 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220231115 *admi } if useOldAPI { - resp, _, err := connV220231115.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + resp, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() if err != nil { - if apiError, ok := admin20231115.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { return fmt.Errorf("%s : %s", ErrorOperationNotPermitted, AsymmetricShardsUnsupportedAction) } return fmt.Errorf("error getting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) } - var copySettingsOldSDK *[]admin20231115.DiskBackupCopySetting + var copySettingsOldSDK *[]admin20240530.DiskBackupCopySetting if isCopySettingsNonEmptyOrChanged(d) { copySettingsOldSDK = expandCopySettingsOldSDK(copySettings.([]any)) } @@ -610,9 +610,9 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220231115 *admi policiesOldSDK := getRequestPoliciesOldSDK(convertPolicyItemsToOldSDK(&policiesItem), resp.GetPolicies()) reqOld := convertBackupScheduleReqToOldSDK(req, copySettingsOldSDK, policiesOldSDK) - _, _, err = connV220231115.CloudBackupsApi.UpdateBackupSchedule(context.Background(), projectID, clusterName, reqOld).Execute() + _, _, err = connV220240530.CloudBackupsApi.UpdateBackupSchedule(context.Background(), projectID, clusterName, reqOld).Execute() if err != nil { - if apiError, ok := admin20231115.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { return fmt.Errorf("%s : %s", ErrorOperationNotPermitted, AsymmetricShardsUnsupportedAction) } return err @@ -639,13 +639,13 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220231115 *admi return nil } -func ExpandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting20250101 { +func ExpandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting20240805 { if tfMap == nil { return nil } frequencies := conversion.ExpandStringList(tfMap["frequencies"].(*schema.Set).List()) - copySetting := &admin.DiskBackupCopySetting20250101{ + copySetting := &admin.DiskBackupCopySetting20240805{ CloudProvider: conversion.Pointer(tfMap["cloud_provider"].(string)), Frequencies: &frequencies, RegionName: conversion.Pointer(tfMap["region_name"].(string)), @@ -655,8 +655,8 @@ func ExpandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting2025010 return copySetting } -func ExpandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting20250101 { - copySettings := make([]admin.DiskBackupCopySetting20250101, 0) +func ExpandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting20240805 { + copySettings := make([]admin.DiskBackupCopySetting20240805, 0) for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) @@ -669,8 +669,8 @@ func ExpandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting20250101 { return ©Settings } -func expandCopySettingsOldSDK(tfList []any) *[]admin20231115.DiskBackupCopySetting { - copySettings := make([]admin20231115.DiskBackupCopySetting, 0) +func expandCopySettingsOldSDK(tfList []any) *[]admin20240530.DiskBackupCopySetting { + copySettings := make([]admin20240530.DiskBackupCopySetting, 0) for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) @@ -683,13 +683,13 @@ func expandCopySettingsOldSDK(tfList []any) *[]admin20231115.DiskBackupCopySetti return ©Settings } -func expandCopySettingOldSDK(tfMap map[string]any) *admin20231115.DiskBackupCopySetting { +func expandCopySettingOldSDK(tfMap map[string]any) *admin20240530.DiskBackupCopySetting { if tfMap == nil { return nil } frequencies := conversion.ExpandStringList(tfMap["frequencies"].(*schema.Set).List()) - copySetting := &admin20231115.DiskBackupCopySetting{ + copySetting := &admin20240530.DiskBackupCopySetting{ CloudProvider: conversion.Pointer(tfMap["cloud_provider"].(string)), Frequencies: &frequencies, RegionName: conversion.Pointer(tfMap["region_name"].(string)), @@ -721,8 +721,8 @@ func ExpandPolicyItems(items []any, frequencyType string) *[]admin.DiskBackupApi return &results } -func expandPolicyItem(itemObj map[string]any, frequencyType string) admin20240530.DiskBackupApiPolicyItem { - return admin20240530.DiskBackupApiPolicyItem{ +func expandPolicyItem(itemObj map[string]any, frequencyType string) admin.DiskBackupApiPolicyItem { + return admin.DiskBackupApiPolicyItem{ Id: policyItemID(itemObj), RetentionUnit: itemObj["retention_unit"].(string), RetentionValue: itemObj["retention_value"].(int), @@ -791,15 +791,15 @@ func CheckCopySettingsToUseOldAPI(tfList []any, isCreate bool) (bool, error) { return false, nil } -func getRequestPoliciesOldSDK(policiesItem []admin20231115.DiskBackupApiPolicyItem, respPolicies []admin20231115.AdvancedDiskBackupSnapshotSchedulePolicy) *[]admin20231115.AdvancedDiskBackupSnapshotSchedulePolicy { +func getRequestPoliciesOldSDK(policiesItem []admin20240530.DiskBackupApiPolicyItem, respPolicies []admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy) *[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy { if len(policiesItem) > 0 { - policy := admin20231115.AdvancedDiskBackupSnapshotSchedulePolicy{ + policy := admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{ PolicyItems: &policiesItem, } if len(respPolicies) == 1 { policy.Id = respPolicies[0].Id } - return &[]admin20231115.AdvancedDiskBackupSnapshotSchedulePolicy{policy} + return &[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{policy} } return nil } diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 903666850e..991b084b8a 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -7,14 +7,14 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true}) useYearly = mig.IsProviderVersionAtLeast("1.16.0") // attribute introduced in this version - config = configNewPolicies(&clusterInfo, &admin20231115.DiskBackupSnapshotSchedule{ + config = configNewPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -28,7 +28,6 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { { ExternalProviders: mig.ExternalProviders(), Config: config, - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -63,12 +62,12 @@ func TestMigBackupRSCloudBackupSchedule_copySettings(t *testing.T) { terraformStr = clusterInfo.TerraformStr clusterResourceName = clusterInfo.ResourceName projectID = clusterInfo.ProjectID - copySettingsConfigWithRepSpecID = configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin20231115.DiskBackupSnapshotSchedule{ + copySettingsConfigWithRepSpecID = configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), }) - copySettingsConfigWithZoneID = configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20231115.DiskBackupSnapshotSchedule{ + copySettingsConfigWithZoneID = configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index c66c8dc4c9..d65a48e26c 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupschedule" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - admin20231115 "go.mongodb.org/atlas-sdk/v20231115014/admin" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) var ( @@ -30,12 +30,11 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configNoPolicies(&clusterInfo, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configNoPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), }), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -61,12 +60,11 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ), }, { - Config: configNewPolicies(&clusterInfo, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configNewPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), }, true), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -107,12 +105,11 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ), }, { - Config: configAdvancedPolicies(&clusterInfo, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configAdvancedPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), }), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -174,7 +171,6 @@ func TestAccBackupRSCloudBackupSchedule_export(t *testing.T) { Steps: []resource.TestStep{ { Config: configExportPolicies(&clusterInfo, policyName, roleName, bucketName), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -203,12 +199,11 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configDefault(&clusterInfo, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configDefault(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), }), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -239,12 +234,11 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { ), }, { - Config: configOnePolicy(&clusterInfo, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configOnePolicy(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), }), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -332,7 +326,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_repSpecId(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -340,7 +334,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_repSpecId(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc(checksCreateAll...), }, { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, true, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, true, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -418,7 +412,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_zoneId(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -426,7 +420,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_zoneId(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc(checksCreateAll...), }, { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, false, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, false, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -448,12 +442,11 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configDefault(&clusterInfo, &admin20231115.DiskBackupSnapshotSchedule{ + Config: configDefault(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), }), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -497,8 +490,6 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { var ( spec = acc.ReplicationSpecRequest{ProviderName: constant.AZURE} clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{spec}}) - spec = acc.ReplicationSpecRequest{ProviderName: constant.AZURE} - clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{spec}}) ) resource.ParallelTest(t, resource.TestCase{ @@ -507,12 +498,11 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configAzure(&clusterInfo, &admin20231115.DiskBackupApiPolicyItem{ + Config: configAzure(&clusterInfo, &admin20240530.DiskBackupApiPolicyItem{ FrequencyInterval: 1, RetentionUnit: "days", RetentionValue: 1, }), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -522,12 +512,11 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1")), }, { - Config: configAzure(&clusterInfo, &admin20231115.DiskBackupApiPolicyItem{ + Config: configAzure(&clusterInfo, &admin20240530.DiskBackupApiPolicyItem{ FrequencyInterval: 2, RetentionUnit: "days", RetentionValue: 3, }), - Check: resource.ComposeAggregateTestCheckFunc( Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), @@ -666,7 +655,7 @@ func checkDestroy(s *terraform.State) error { return nil } -func configNoPolicies(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshotSchedule) string { +func configNoPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -684,10 +673,9 @@ func configNoPolicies(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshot project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) - `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configDefault(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshotSchedule) string { +func configDefault(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -731,10 +719,9 @@ func configDefault(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshotSch project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) - `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings, useRepSpecID bool, p *admin20231115.DiskBackupSnapshotSchedule) string { +func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings, useRepSpecID bool, p *admin20240530.DiskBackupSnapshotSchedule) string { var copySettings string var dataSourceConfig string @@ -827,7 +814,7 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp `, terraformStr, projectID, clusterResourceName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings, dataSourceConfig) } -func configOnePolicy(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshotSchedule) string { +func configOnePolicy(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -845,10 +832,9 @@ func configOnePolicy(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshotS } } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) - `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configNewPolicies(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshotSchedule, useYearly bool) string { +func configNewPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule, useYearly bool) string { var strYearly string if useYearly { strYearly = ` @@ -860,7 +846,6 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapsho ` } - return info.TerraformStr + fmt.Sprintf(` return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -900,10 +885,9 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapsho project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) - `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) } -func configAzure(info *acc.ClusterInfo, policy *admin20231115.DiskBackupApiPolicyItem) string { +func configAzure(info *acc.ClusterInfo, policy *admin20240530.DiskBackupApiPolicyItem) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -923,10 +907,9 @@ func configAzure(info *acc.ClusterInfo, policy *admin20231115.DiskBackupApiPolic project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) - `, info.TerraformNameRef, info.ProjectID, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) } -func configAdvancedPolicies(info *acc.ClusterInfo, p *admin20231115.DiskBackupSnapshotSchedule) string { +func configAdvancedPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -975,12 +958,10 @@ func configAdvancedPolicies(info *acc.ClusterInfo, p *admin20231115.DiskBackupSn } } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) - `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketName string) string { return info.TerraformStr + fmt.Sprintf(` - return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q @@ -1090,7 +1071,6 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam EOF } `, info.TerraformNameRef, info.ProjectID, policyName, roleName, bucketName) - `, info.TerraformNameRef, info.ProjectID, policyName, roleName, bucketName) } func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go index afb105fdab..af79008157 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go @@ -89,7 +89,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).AtlasV2 - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Id() enabled := d.Get("enabled").(bool) @@ -115,7 +114,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. stateConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, conn.ClustersApi), Timeout: d.Timeout(timeoutKey.(string)), MinTimeout: 5 * time.Second, Delay: 3 * time.Second, diff --git a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go index 47450f69f5..4e8269d0bb 100644 --- a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go +++ b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go @@ -142,7 +142,6 @@ func Resource() *schema.Resource { func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) privateLinkID := conversion.GetEncodedID(d.Get("private_link_id").(string), "private_link_id") providerName := d.Get("provider_name").(string) @@ -193,7 +192,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. clusterConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV2.ClustersApi), Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 5 * time.Second, Delay: 5 * time.Minute, @@ -286,7 +285,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] @@ -318,7 +316,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. clusterConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV2.ClustersApi), Timeout: d.Timeout(schema.TimeoutDelete), MinTimeout: 5 * time.Second, Delay: 5 * time.Minute, diff --git a/internal/service/streamconnection/data_source_stream_connections_test.go b/internal/service/streamconnection/data_source_stream_connections_test.go index ca480ae389..47af9736cc 100644 --- a/internal/service/streamconnection/data_source_stream_connections_test.go +++ b/internal/service/streamconnection/data_source_stream_connections_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) func TestAccStreamDSStreamConnections_basic(t *testing.T) { diff --git a/internal/testutil/acc/advanced_cluster.go b/internal/testutil/acc/advanced_cluster.go index 136a84430c..95897cef6d 100644 --- a/internal/testutil/acc/advanced_cluster.go +++ b/internal/testutil/acc/advanced_cluster.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" - "go.mongodb.org/atlas-sdk/v20240530002/admin" + "go.mongodb.org/atlas-sdk/v20240805001/admin" ) var ( diff --git a/internal/testutil/acc/atlas.go b/internal/testutil/acc/atlas.go index 2fa121b10f..f75fde0ee5 100644 --- a/internal/testutil/acc/atlas.go +++ b/internal/testutil/acc/atlas.go @@ -38,7 +38,7 @@ func createCluster(tb testing.TB, projectID, name string) string { _, _, err := ConnV2().ClustersApi.CreateCluster(context.Background(), projectID, &req).Execute() require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) - stateConf := advancedcluster.CreateStateChangeConfig(context.Background(), ConnV220240530(), projectID, name, 1*time.Hour) + stateConf := advancedcluster.CreateStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) _, err = stateConf.WaitForStateContext(context.Background()) require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) @@ -50,7 +50,7 @@ func deleteCluster(projectID, name string) { if err != nil { fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) } - stateConf := advancedcluster.DeleteStateChangeConfig(context.Background(), ConnV220240530(), projectID, name, 1*time.Hour) + stateConf := advancedcluster.DeleteStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) _, err = stateConf.WaitForStateContext(context.Background()) if err != nil { fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) diff --git a/internal/testutil/acc/factory.go b/internal/testutil/acc/factory.go index 608615d2ab..669d82b929 100644 --- a/internal/testutil/acc/factory.go +++ b/internal/testutil/acc/factory.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/provider" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" "go.mongodb.org/atlas-sdk/v20240805001/admin" ) @@ -40,10 +39,6 @@ func ConnV2() *admin.APIClient { return MongoDBClient.AtlasV2 } -func ConnV220240530() *admin20240530.APIClient { - return MongoDBClient.AtlasV220240530 -} - func ConnV2UsingProxy(proxyPort *int) *admin.APIClient { cfg := config.Config{ PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), From f647db4ed7a4b6738ba8c9a7301bdc8cb5e0ae81 Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Sun, 11 Aug 2024 23:34:26 +0200 Subject: [PATCH 81/84] fix incorrect merging in cloud backup schedule tests --- .../resource_cloud_backup_schedule_test.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index d65a48e26c..23b2fbafc9 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -660,7 +660,6 @@ func configNoPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshot resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -670,7 +669,6 @@ func configNoPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshot data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } @@ -680,7 +678,6 @@ func configDefault(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSch resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -716,7 +713,6 @@ func configDefault(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSch data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } @@ -772,8 +768,6 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { project_id = %[2]q cluster_name = %[3]s.name - project_id = %[2]q - cluster_name = %[3]s.name reference_hour_of_day = %[4]d reference_minute_of_hour = %[5]d @@ -819,7 +813,6 @@ func configOnePolicy(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotS resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -850,7 +843,6 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapsho resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -882,7 +874,6 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapsho data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) } @@ -892,7 +883,6 @@ func configAzure(info *acc.ClusterInfo, policy *admin20240530.DiskBackupApiPolic resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q policy_item_hourly { frequency_interval = %[3]d @@ -904,7 +894,6 @@ func configAzure(info *acc.ClusterInfo, policy *admin20240530.DiskBackupApiPolic data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q } `, info.TerraformNameRef, info.ProjectID, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) } @@ -914,7 +903,6 @@ func configAdvancedPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSn resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q auto_export_enabled = false reference_hour_of_day = %[3]d @@ -965,7 +953,6 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s project_id = %[2]q - project_id = %[2]q auto_export_enabled = true reference_hour_of_day = 20 reference_minute_of_hour = "05" @@ -1005,13 +992,11 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } resource "mongodbatlas_cloud_provider_access_setup" "setup_only" { - project_id = %[2]q project_id = %[2]q provider_name = "AWS" } resource "mongodbatlas_cloud_provider_access_authorization" "auth_role" { - project_id = %[2]q project_id = %[2]q role_id = mongodbatlas_cloud_provider_access_setup.setup_only.role_id aws { @@ -1020,7 +1005,6 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { - project_id = %[2]q project_id = %[2]q iam_role_id = mongodbatlas_cloud_provider_access_authorization.auth_role.role_id bucket_name = aws_s3_bucket.backup.bucket From c54da61633b0ce20653b9fb22e40b2a08fdeb155 Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Sun, 11 Aug 2024 23:41:44 +0200 Subject: [PATCH 82/84] using connV2 for import in advanced cluster --- internal/service/advancedcluster/resource_advanced_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 5661f597d8..c18965b4f5 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -1088,14 +1088,14 @@ func DeleteStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, proje } func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID, name, err := splitSClusterAdvancedImportID(d.Id()) if err != nil { return nil, err } - cluster, _, err := connV220240530.ClustersApi.GetCluster(ctx, *projectID, *name).Execute() + cluster, _, err := connV2.ClustersApi.GetCluster(ctx, *projectID, *name).Execute() if err != nil { return nil, fmt.Errorf("couldn't import cluster %s in project %s, error: %s", *name, *projectID, err) } From edb8fbb9ed52a21ab38324dd0b56013aa2274c8a Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Sun, 11 Aug 2024 23:46:32 +0200 Subject: [PATCH 83/84] use lastest sdk model for tests that require autoscaling model --- .../resource_advanced_cluster_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 495bb62ef9..2ef25cc775 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -374,13 +374,13 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAnalyticsAutoScaling(t projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - autoScaling = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScaling = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } - autoScalingUpdated = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScalingUpdated = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } ) @@ -1308,7 +1308,7 @@ func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin.A `, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize()) } -func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p *admin20240530.AdvancedAutoScalingSettings) string { +func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q From e4a5c2dabe840c62ca299dc65800fe866cf6ac53 Mon Sep 17 00:00:00 2001 From: Agustin Bettati Date: Sun, 11 Aug 2024 23:50:41 +0200 Subject: [PATCH 84/84] avoid using old SDK for delete operation --- .../resource_cloud_backup_schedule.go | 10 +++++----- .../resource_cloud_backup_schedule_migration_test.go | 3 --- .../resource_cloud_backup_schedule_test.go | 12 ------------ 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go index 4507ec76a1..c67cc55b20 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go @@ -330,7 +330,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. // MongoDB Atlas automatically generates a default backup policy for that cluster. // As a result, we need to first delete the default policies to avoid having // the infrastructure differs from the TF configuration file. - if _, _, err := connV220240530.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute(); err != nil { + if _, _, err := connV2.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute(); err != nil { diagWarning := diag.Diagnostic{ Severity: diag.Warning, Summary: "Error deleting default backup schedule", @@ -492,12 +492,12 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - _, _, err := connV220240530.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute() + _, _, err := connV2.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute() if err != nil { return diag.Errorf("error deleting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) } @@ -508,7 +508,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 parts := strings.SplitN(d.Id(), "-", 2) if len(parts) != 2 { @@ -518,7 +518,7 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s projectID := parts[0] clusterName := parts[1] - _, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + _, _, err := connV2.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() if err != nil { return nil, fmt.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) } diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 991b084b8a..8ce9343db3 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -31,9 +31,6 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 23b2fbafc9..b8ff54e01e 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -38,7 +38,6 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -68,7 +67,6 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), @@ -113,7 +111,6 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), @@ -174,7 +171,6 @@ func TestAccBackupRSCloudBackupSchedule_export(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "20"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "5"), @@ -207,7 +203,6 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -242,7 +237,6 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), @@ -450,7 +444,6 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -506,7 +499,6 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "1"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1")), @@ -520,7 +512,6 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "2"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "3"), @@ -769,9 +760,6 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp project_id = %[2]q cluster_name = %[3]s.name - reference_hour_of_day = %[4]d - reference_minute_of_hour = %[5]d - restore_window_days = %[6]d reference_hour_of_day = %[4]d reference_minute_of_hour = %[5]d restore_window_days = %[6]d