diff --git a/.changelog/2420.txt b/.changelog/2420.txt new file mode 100644 index 0000000000..8baf62b7f9 --- /dev/null +++ b/.changelog/2420.txt @@ -0,0 +1,11 @@ +```release-note:note +resource/mongodbatlas_advanced_cluster: Deprecates `replication_specs.#.id`, `replication_specs.#.num_shards`, `disk_size_gb`, `advanced_configuration.0.default_read_concern`, and `advanced_configuration.0.fail_index_key_too_long`. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide). +``` + +```release-note:note +data-source/mongodbatlas_advanced_cluster: Deprecates `replication_specs.#.id`, `replication_specs.#.num_shards`, `disk_size_gb`, `advanced_configuration.0.default_read_concern`, and `advanced_configuration.0.fail_index_key_too_long`. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide). +``` + +```release-note:note +data-source/mongodbatlas_advanced_clusters: Deprecates `replication_specs.#.id`, `replication_specs.#.num_shards`, `disk_size_gb`, `advanced_configuration.0.default_read_concern`, and `advanced_configuration.0.fail_index_key_too_long`. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide). +``` diff --git a/.changelog/2459.txt b/.changelog/2459.txt new file mode 100644 index 0000000000..3b70dc5eca --- /dev/null +++ b/.changelog/2459.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/mongodbatlas_cloud_backup_schedule: Adds `copy_settings.#.zone_id` and deprecates `copy_settings.#.replication_spec_id` for referencing zones of a cluster. This enables referencing zones of clusters using independent shard scaling which no longer support `replication_spec.*.id`. +``` diff --git a/.changelog/2464.txt b/.changelog/2464.txt new file mode 100644 index 0000000000..f508aa7c66 --- /dev/null +++ b/.changelog/2464.txt @@ -0,0 +1,4 @@ + +```release-note:enhancement +data-source/mongodbatlas_cloud_backup_schedule: Adds new `use_zone_id_for_copy_settings` and `copy_settings.#.zone_id` attributes and deprecates `copy_settings.#.replication_spec_id`. These new attributes enable you to reference cluster zones using independent shard scaling, which no longer supports `replication_spec.*.id` +``` diff --git a/.changelog/2478.txt b/.changelog/2478.txt new file mode 100644 index 0000000000..7e35be9a8b --- /dev/null +++ b/.changelog/2478.txt @@ -0,0 +1,19 @@ +```release-note:enhancement +resource/mongodbatlas_advanced_cluster: Supports defining cluster shards with independent `replication_specs` objects. This feature enables defining independent scaled shards. To learn more, see the [Advanced Cluster New Sharding Schema Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/advanced-cluster-new-sharding-schema). +``` + +```release-note:note +resource/mongodbatlas_advanced_cluster: Using this new version impacts the possibility of editing the definition of multi shard clusters in the Atlas UI. This impact is limited to the first weeks of September. +``` + +```release-note:enhancement +resource/mongodbatlas_advanced_cluster: Adds `replication_specs.*.zone_id` and `replication_specs.*.region_configs.*.(electable_specs|analytics_specs|read_only_specs).disk_size_gb` attributes. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide) and resource documentation. +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_cluster: Adds `use_replication_spec_per_shard`, `replication_specs.*.zone_id`, and `replication_specs.*.region_configs.*.(electable_specs|analytics_specs|read_only_specs).disk_size_gb` attributes. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide) and data source documentation. +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_clusters: Adds `use_replication_spec_per_shard`, `replication_specs.*.zone_id`, and `replication_specs.*.region_configs.*.(electable_specs|analytics_specs|read_only_specs).disk_size_gb` attributes. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide) and data source documentation. +``` diff --git a/.changelog/2499.txt b/.changelog/2499.txt new file mode 100644 index 0000000000..9c6edddb81 --- /dev/null +++ b/.changelog/2499.txt @@ -0,0 +1,47 @@ +```release-note:breaking-change +resource/mongodbatlas_third_party_integration: Removes `scheme` attribute +``` + +```release-note:breaking-change +data-source/mongodbatlas_third_party_integration: Removes `scheme` attribute +``` + +```release-note:breaking-change +data-source/mongodbatlas_third_party_integrations: Removes `scheme` attribute +``` + +```release-note:breaking-change +data-source/mongodbatlas_federated_settings_identity_providers: Removes `page_num` and `items_per_page` attributes +``` + +```release-note:breaking-change +data-source/mongodbatlas_cloud_backup_snapshot_export_bucket: Changes `id` attribute from optional to computed only +``` + +```release-note:breaking-change +data-source/mongodbatlas_cloud_backup_snapshot_export_job: Changes `id` attribute from optional to computed only +``` + +```release-note:breaking-change +resource/mongodbatlas_cloud_backup_snapshot_restore_job: Removes `created_at` attribute +``` + +```release-note:breaking-change +data-source/mongodbatlas_cloud_backup_snapshot_restore_job: Removes `created_at` attribute +``` + +```release-note:breaking-change +data-source/mongodbatlas_cloud_backup_snapshot_restore_jobs: Removes `created_at` attribute +``` + +```release-note:breaking-change +data-source/mongodbatlas_cloud_backup_snapshot_restore_job: Removes `job_id` attribute and defines `snapshot_restore_job_id` attribute as required +``` + +```release-note:breaking-change +data-source/mongodbatlas_privatelink_endpoint_service: Removes `endpoints.*.service_attachment_name` attribute +``` + +```release-note:breaking-change +resource/mongodbatlas_privatelink_endpoint_service: Removes `endpoints.*.service_attachment_name` attribute +``` diff --git a/.changelog/2505.txt b/.changelog/2505.txt index a6347324a2..f9a5d81b39 100644 --- a/.changelog/2505.txt +++ b/.changelog/2505.txt @@ -1,3 +1,7 @@ -```release-note:new-datasource -data-source/mongodbatlas_stream_processors +```release-note:new-guide +[Migration Guide: Advanced Cluster New Sharding Schema](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/advanced-cluster-new-sharding-schema). This enables Independent Shard Scaling. +``` + +```release-note:new-guide +[Migration Guide: Cluster to Advanced Cluster](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/cluster-to-advanced-cluster-migration-guide) ``` diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4ce09e6022..4e133f503f 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -20,7 +20,7 @@ Link to any related issue(s): - [ ] I have added tests that prove my fix is effective or that my feature works per HashiCorp requirements - [ ] I have added any necessary documentation (if appropriate) - [ ] I have run make fmt and formatted my code -- [ ] If changes include deprecations or removals, I defined an isolated PR with a relevant title as it will be used in the auto-generated changelog. +- [ ] If changes include deprecations or removals I have added appropriate changelog entries. - [ ] If changes include removal or addition of 3rd party GitHub actions, I updated our internal document. Reach out to the APIx Integration slack channel to get access to the internal document. ## Further comments diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index dc59f8c629..6724413d7e 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -295,7 +295,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -317,7 +317,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -357,7 +357,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -395,7 +395,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -419,7 +419,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -441,7 +441,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -486,7 +486,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -508,7 +508,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -530,7 +530,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -552,7 +552,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -590,7 +590,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -617,7 +617,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -646,7 +646,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -691,7 +691,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -728,7 +728,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -753,7 +753,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -775,7 +775,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -797,7 +797,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -824,7 +824,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false @@ -849,7 +849,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} terraform_wrapper: false diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index 2339309f91..17065f39f8 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -49,7 +49,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 with: - version: v1.59.1 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version + version: v1.60.3 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version - name: actionlint run: | make tools diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index daa8791a50..a0667e5f6e 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -21,7 +21,7 @@ jobs: - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - - uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ vars.TF_VERSION_LATEST }} terraform_wrapper: false diff --git a/.github/workflows/generate-changelog.yml b/.github/workflows/generate-changelog.yml index 22701df689..37c6ce991c 100644 --- a/.github/workflows/generate-changelog.yml +++ b/.github/workflows/generate-changelog.yml @@ -28,7 +28,7 @@ jobs: steps: - name: Send Slack message id: slack - uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e + uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 with: payload: | { diff --git a/.github/workflows/notify-docs-team.yml b/.github/workflows/notify-docs-team.yml index 7b6357b94f..2eac3ba993 100644 --- a/.github/workflows/notify-docs-team.yml +++ b/.github/workflows/notify-docs-team.yml @@ -29,7 +29,7 @@ jobs: permissions: pull-requests: write # Needed by sticky-pull-request-comment steps: - - uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e + - uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_DOCS }} SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK diff --git a/.github/workflows/terraform-compatibility-matrix.yml b/.github/workflows/terraform-compatibility-matrix.yml index 41b7a3e108..7e6799f8f6 100644 --- a/.github/workflows/terraform-compatibility-matrix.yml +++ b/.github/workflows/terraform-compatibility-matrix.yml @@ -64,7 +64,7 @@ jobs: echo "slack_payload=${slack_message}" >> "${GITHUB_OUTPUT}" - name: Send Slack message id: slack - uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e + uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 with: payload: ${{ steps.slack-payload.outputs.slack_payload }} env: diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index ff25bb843d..a68a520fed 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -87,7 +87,7 @@ jobs: steps: - name: Send Slack message id: slack - uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e + uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 with: payload: | { diff --git a/.github/workflows/update-sdk.yml b/.github/workflows/update-sdk.yml index 5bf2477337..5cc0a734e1 100644 --- a/.github/workflows/update-sdk.yml +++ b/.github/workflows/update-sdk.yml @@ -23,7 +23,7 @@ jobs: uses: tj-actions/verify-changed-files@79f398ac63ab46f7f820470c821d830e5c340ef9 id: verify-changed-files - name: Create PR - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c + uses: peter-evans/create-pull-request@4320041ed380b20e97d388d56a7fb4f9b8c20e79 if: steps.verify-changed-files.outputs.files_changed == 'true' with: token: ${{ secrets.APIX_BOT_PAT }} diff --git a/.github/workflows/update_tf_compatibility_matrix.yml b/.github/workflows/update_tf_compatibility_matrix.yml index d562f41bdc..bb3084884f 100644 --- a/.github/workflows/update_tf_compatibility_matrix.yml +++ b/.github/workflows/update_tf_compatibility_matrix.yml @@ -22,7 +22,7 @@ jobs: uses: tj-actions/verify-changed-files@79f398ac63ab46f7f820470c821d830e5c340ef9 id: verify-changed-files - name: Create PR - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c + uses: peter-evans/create-pull-request@4320041ed380b20e97d388d56a7fb4f9b8c20e79 if: steps.verify-changed-files.outputs.files_changed == 'true' with: token: ${{ secrets.APIX_BOT_PAT }} diff --git a/.golangci.yml b/.golangci.yml index 7bcb5c94a9..3620dfb765 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -85,7 +85,7 @@ linters: - nakedret - nolintlint - rowserrcheck - - exportloopref + - copyloopvar - staticcheck - stylecheck - typecheck diff --git a/.tool-versions b/.tool-versions index c23da88ab6..46456547b9 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -golang 1.22.5 -terraform 1.9.2 +golang 1.23.0 +terraform 1.9.5 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c30d1f9ca..a8a41dd7b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,46 @@ ## (Unreleased) +## 1.18.1 (August 26, 2024) + +## 1.18.0 (August 14, 2024) + +BREAKING CHANGES: + +* data-source/mongodbatlas_cloud_backup_snapshot_export_bucket: Changes `id` attribute from optional to computed only ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_cloud_backup_snapshot_export_job: Changes `id` attribute from optional to computed only ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_cloud_backup_snapshot_restore_job: Removes `created_at` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_cloud_backup_snapshot_restore_job: Removes `job_id` attribute and defines `snapshot_restore_job_id` attribute as required ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_cloud_backup_snapshot_restore_jobs: Removes `created_at` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_federated_settings_identity_providers: Removes `page_num` and `items_per_page` attributes ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_privatelink_endpoint_service: Removes `endpoints.*.service_attachment_name` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_third_party_integration: Removes `scheme` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* data-source/mongodbatlas_third_party_integrations: Removes `scheme` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* resource/mongodbatlas_cloud_backup_snapshot_restore_job: Removes `created_at` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* resource/mongodbatlas_privatelink_endpoint_service: Removes `endpoints.*.service_attachment_name` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) +* resource/mongodbatlas_third_party_integration: Removes `scheme` attribute ([#2499](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2499)) + +NOTES: + +* data-source/mongodbatlas_advanced_cluster: Deprecates `replication_specs.#.id`, `replication_specs.#.num_shards`, `disk_size_gb`, `advanced_configuration.0.default_read_concern`, and `advanced_configuration.0.fail_index_key_too_long`. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide). ([#2420](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2420)) +* data-source/mongodbatlas_advanced_clusters: Deprecates `replication_specs.#.id`, `replication_specs.#.num_shards`, `disk_size_gb`, `advanced_configuration.0.default_read_concern`, and `advanced_configuration.0.fail_index_key_too_long`. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide). ([#2420](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2420)) +* resource/mongodbatlas_advanced_cluster: Deprecates `replication_specs.#.id`, `replication_specs.#.num_shards`, `disk_size_gb`, `advanced_configuration.0.default_read_concern`, and `advanced_configuration.0.fail_index_key_too_long`. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide). ([#2420](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2420)) +* resource/mongodbatlas_advanced_cluster: Using this new version impacts the possibility of editing the definition of multi shard clusters in the Atlas UI. This impact is limited to the first weeks of September. ([#2478](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2478)) + +FEATURES: + +* **New Guide:** [Migration Guide: Advanced Cluster New Sharding Schema](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/advanced-cluster-new-sharding-schema). This enables Independent Shard Scaling. ([#2505](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2505)) +* **New Guide:** [Migration Guide: Cluster to Advanced Cluster](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/cluster-to-advanced-cluster-migration-guide) ([#2505](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2505)) + ENHANCEMENTS: +* data-source/mongodbatlas_advanced_cluster: Adds `use_replication_spec_per_shard`, `replication_specs.*.zone_id`, and `replication_specs.*.region_configs.*.(electable_specs|analytics_specs|read_only_specs).disk_size_gb` attributes. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide) and data source documentation. ([#2478](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2478)) +* data-source/mongodbatlas_advanced_clusters: Adds `use_replication_spec_per_shard`, `replication_specs.*.zone_id`, and `replication_specs.*.region_configs.*.(electable_specs|analytics_specs|read_only_specs).disk_size_gb` attributes. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide) and data source documentation. ([#2478](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2478)) +* data-source/mongodbatlas_cloud_backup_schedule: Adds new `use_zone_id_for_copy_settings` and `copy_settings.#.zone_id` attributes and deprecates `copy_settings.#.replication_spec_id`. These new attributes enable you to reference cluster zones using independent shard scaling, which no longer supports `replication_spec.*.id` ([#2464](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2464)) * data-source/mongodbatlas_cloud_backup_snapshot_export_bucket: Adds Azure support ([#2486](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2486)) * data-source/mongodbatlas_cloud_backup_snapshot_export_buckets: Adds Azure support ([#2486](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2486)) +* resource/mongodbatlas_advanced_cluster: Adds `replication_specs.*.zone_id` and `replication_specs.*.region_configs.*.(electable_specs|analytics_specs|read_only_specs).disk_size_gb` attributes. To learn more, see the [1.18.0 Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide) and resource documentation. ([#2478](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2478)) +* resource/mongodbatlas_advanced_cluster: Supports defining cluster shards with independent `replication_specs` objects. This feature enables defining independent scaled shards. To learn more, see the [Advanced Cluster New Sharding Schema Migration Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/advanced-cluster-new-sharding-schema). ([#2478](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2478)) +* resource/mongodbatlas_cloud_backup_schedule: Adds `copy_settings.#.zone_id` and deprecates `copy_settings.#.replication_spec_id` for referencing zones of a cluster. This enables referencing zones of clusters using independent shard scaling which no longer support `replication_spec.*.id`. ([#2459](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2459)) * resource/mongodbatlas_cloud_backup_snapshot_export_bucket: Adds Azure support ([#2486](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2486)) ## 1.17.6 (August 07, 2024) diff --git a/GNUmakefile b/GNUmakefile index 488873875d..84619e6bde 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -18,7 +18,7 @@ GITTAG=$(shell git describe --always --tags) VERSION=$(GITTAG:v%=%) LINKER_FLAGS=-s -w -X 'github.com/mongodb/terraform-provider-mongodbatlas/version.ProviderVersion=${VERSION}' -GOLANGCI_VERSION=v1.59.1 # Also update golangci-lint GH action in code-health.yml when updating this version +GOLANGCI_VERSION=v1.60.3 # Also update golangci-lint GH action in code-health.yml when updating this version export PATH := $(shell go env GOPATH)/bin:$(PATH) export SHELL := env PATH=$(PATH) /bin/bash @@ -74,6 +74,7 @@ lint: .PHONY: tools tools: ## Install dev tools @echo "==> Installing dependencies..." + go telemetry off # disable sending telemetry data, more info: https://go.dev/doc/telemetry go install github.com/icholy/gomajor@latest go install github.com/terraform-linters/tflint@v0.52.0 go install github.com/rhysd/actionlint/cmd/actionlint@latest diff --git a/README.md b/README.md index b665386052..ea50b98e8b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Feature requests can be submitted at https://feedback.mongodb.com/forums/924145- ## Requirements - [HashiCorp Terraform Version Compatibility Matrix](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs#hashicorp-terraform-versionhttpswwwterraformiodownloadshtml-compatibility-matrix) -- [Go Version](https://golang.org/doc/install) 1.22 (to build the provider plugin) +- [Go Version](https://golang.org/doc/install) 1.23 (to build the provider plugin) ## Using the Provider diff --git a/contributing/README.md b/contributing/README.md index 31a2ca2103..9718e582c0 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -1,6 +1,6 @@ # Contribution guidelines -Thanks for your interest in contributing to MongoDB Atlas Terraform Provider, the following documents define guidelines necessary to participate in the community. +Thanks for your interest in contributing to MongoDB Atlas Terraform Provider, the following documents define guidelines necessary to participate in the community. Please note that we no longer develop or maintain any resources or data sources we marked as deprecated in our [documentation](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs). - [Development Setup](development-setup.md) - [Development Best Practices](development-best-practices.md) diff --git a/contributing/development-setup.md b/contributing/development-setup.md index e01ca8a79d..ece7da2611 100644 --- a/contributing/development-setup.md +++ b/contributing/development-setup.md @@ -11,7 +11,7 @@ ### Prerequisite Tools - [Git](https://git-scm.com/) -- [Go (at least Go 1.22)](https://golang.org/dl/) +- [Go (at least Go 1.23)](https://golang.org/dl/) ### Environment @@ -51,7 +51,7 @@ For more explained information about plugin override check [Development Override - Make sure that the PR title follows [*Conventional Commits*](https://www.conventionalcommits.org/). - Add comments around your new code that explain what's happening. - Commit and push your changes to your branch then submit a pull request against the `master` branch. -- A repo maintainer will review your pull request. +- A repo maintainer will review your pull request. **Note**: If you have an active [MongoDB Atlas Support](https://www.mongodb.com/services/support/atlas-support-plans) contract, we recommend also creating a support ticket for any questions related to this process. ### Merging a Pull Request Due to security reasons, there are restrictions on how external contributions can be handled, especially concerning the use of repository secrets and running tests from forks. diff --git a/docs/data-sources/advanced_cluster.md b/docs/data-sources/advanced_cluster.md index 73c63dc8a5..9044c4c884 100644 --- a/docs/data-sources/advanced_cluster.md +++ b/docs/data-sources/advanced_cluster.md @@ -35,10 +35,54 @@ data "mongodbatlas_advanced_cluster" "example" { } ``` +## Example using latest sharding configurations with independent shard scaling in the cluster + +```terraform +resource "mongodbatlas_advanced_cluster" "example" { + project_id = "" + name = "cluster-test" + backup_enabled = false + cluster_type = "SHARDED" + + replication_specs { # Sharded cluster with 2 asymmetric shards (M30 and M40) + region_configs { + electable_specs { + instance_size = "M30" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { + region_configs { + electable_specs { + instance_size = "M40" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} + +data "mongodbatlas_advanced_cluster" "example" { + project_id = mongodbatlas_advanced_cluster.example.project_id + name = mongodbatlas_advanced_cluster.example.name + use_replication_spec_per_shard = true +} +``` + ## Argument Reference * `project_id` - (Required) The unique ID for the project to create the database user. * `name` - (Required) Name of the cluster as it appears in Atlas. Once the cluster is created, its name cannot be changed. +* `use_replication_spec_per_shard` - (Optional) Set this field to true to allow the data source to use the latest schema representing each shard with an individual `replication_specs` object. This enables representing clusters with independent shard scaling. ## Attributes Reference @@ -47,13 +91,13 @@ In addition to all arguments above, the following attributes are exported: * `id` - The cluster ID. * `bi_connector_config` - Configuration settings applied to BI Connector for Atlas on this cluster. See [below](#bi_connector_config). **NOTE** Prior version of provider had parameter as `bi_connector` * `cluster_type` - Type of the cluster that you want to create. -* `disk_size_gb` - Capacity, in gigabytes, of the host's root volume. +* `disk_size_gb` - Capacity, in gigabytes, of the host's root volume. **(DEPRECATED)** Use `replication_specs.#.region_config.#.(analytics_specs|electable_specs|read_only_specs).disk_size_gb` instead. To learn more, see the [Migration Guide](../guides/1.18.0-upgrade-guide.html.markdown). * `encryption_at_rest_provider` - Possible values are AWS, GCP, AZURE or NONE. * `tags` - Set that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster. See [below](#tags). -* `labels` - Set that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster. See [below](#labels). **DEPRECATED** Use `tags` instead. +* `labels` - Set that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster. See [below](#labels). **(DEPRECATED.)** Use `tags` instead. * `mongo_db_major_version` - Version of the cluster to deploy. * `pit_enabled` - Flag that indicates if the cluster uses Continuous Cloud Backup. -* `replication_specs` - Configuration for cluster regions and the hardware provisioned in them. See [below](#replication_specs). +* `replication_specs` - List of settings that configure your cluster regions. If `use_replication_spec_per_shard = true`, this array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations. See [below](#replication_specs). * `root_cert_type` - Certificate Authority that MongoDB Atlas clusters use. * `termination_protection_enabled` - Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster. * `version_release_system` - Release cadence that Atlas uses for this cluster. @@ -90,10 +134,11 @@ Key-value pairs that categorize the cluster. Each key and value has a maximum le ### replication_specs -* `num_shards` - Provide this value if you set a `cluster_type` of SHARDED or GEOSHARDED. +* `num_shards` - Provide this value if you set a `cluster_type` of `SHARDED` or `GEOSHARDED`. **(DEPRECATED.)** To learn more, see the [Migration Guide](../guides/1.18.0-upgrade-guide.html.markdown). * `region_configs` - Configuration for the hardware specifications for nodes set for a given regionEach `region_configs` object describes the region's priority in elections and the number and type of MongoDB nodes that Atlas deploys to the region. Each `region_configs` object must have either an `analytics_specs` object, `electable_specs` object, or `read_only_specs` object. See [below](#region_configs) * `container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container either created programmatically by the user before any clusters existed in a project or when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`. * `zone_name` - Name for the zone in a Global Cluster. +* `zone_id` - Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. If clusterType is GEOSHARDED, this value indicates the zone that the given shard belongs to and can be used to configure Global Cluster backup policies. ### region_configs @@ -110,12 +155,13 @@ Key-value pairs that categorize the cluster. Each key and value has a maximum le ### specs -* `disk_iops` - Target throughput (IOPS) desired for AWS storage attached to your cluster. +* `disk_iops` - Target IOPS (Input/Output Operations Per Second) desired for storage attached to this hardware. This parameter defaults to the cluster tier's standard IOPS value. * `ebs_volume_type` - Type of storage you want to attach to your AWS-provisioned cluster. * `STANDARD` volume types can't exceed the default IOPS rate for the selected volume size. * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. * `instance_size` - Hardware specification for the instance sizes in this region. * `node_count` - Number of nodes of the given type for MongoDB Atlas to deploy to the region. +* `disk_size_gb` - Storage capacity that the host's root volume possesses expressed in gigabytes. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. ### auto_scaling @@ -134,9 +180,9 @@ Key-value pairs that categorize the cluster. Each key and value has a maximum le * `compute_max_instance_size` - Maximum instance size to which your cluster can automatically scale (such as M40). #### Advanced Configuration -* `default_read_concern` - [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/). +* `default_read_concern` - [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/). **(DEPRECATED.)** MongoDB 5.0 and later clusters default to `local`. To use a custom read concern level, please refer to your driver documentation. * `default_write_concern` - [Default level of acknowledgment requested from MongoDB for write operations](https://docs.mongodb.com/manual/reference/write-concern/) set for this cluster. MongoDB 4.4 clusters default to [1](https://docs.mongodb.com/manual/reference/write-concern/). -* `fail_index_key_too_long` - When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. +* `fail_index_key_too_long` - When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. **(DEPRECATED.)** This parameter has been removed as of [MongoDB 4.4](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.failIndexKeyTooLong). * `javascript_enabled` - When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations. * `minimum_enabled_tls_protocol` - Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are: diff --git a/docs/data-sources/advanced_clusters.md b/docs/data-sources/advanced_clusters.md index f6bb290e63..ee67dd01fd 100644 --- a/docs/data-sources/advanced_clusters.md +++ b/docs/data-sources/advanced_clusters.md @@ -34,9 +34,53 @@ data "mongodbatlas_advanced_clusters" "example" { } ``` +## Example using latest sharding configurations with independent shard scaling in the cluster + +```terraform +resource "mongodbatlas_advanced_cluster" "example" { + project_id = "" + name = "cluster-test" + backup_enabled = false + cluster_type = "SHARDED" + + replication_specs { # Sharded cluster with 2 asymmetric shards (M30 and M40) + region_configs { + electable_specs { + instance_size = "M30" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { + region_configs { + electable_specs { + instance_size = "M40" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} + +data "mongodbatlas_advanced_cluster" "example-asym" { + project_id = mongodbatlas_advanced_cluster.example.project_id + name = mongodbatlas_advanced_cluster.example.name + use_replication_spec_per_shard = true +} +``` + ## Argument Reference * `project_id` - (Required) The unique ID for the project to get the clusters. +* `use_replication_spec_per_shard` - (Optional) Set this field to true to allow the data source to use the latest schema representing each shard with an individual `replication_specs` object. This enables representing clusters with independent shard scaling. **Note:** If not set to true, this data source return all clusters except clusters with asymmetric shards. ## Attributes Reference @@ -49,13 +93,13 @@ In addition to all arguments above, the following attributes are exported: * `bi_connector_config` - Configuration settings applied to BI Connector for Atlas on this cluster. See [below](#bi_connector_config). **NOTE** Prior version of provider had parameter as `bi_connector` * `cluster_type` - Type of the cluster that you want to create. -* `disk_size_gb` - Capacity, in gigabytes, of the host's root volume. +* `disk_size_gb` - Capacity, in gigabytes, of the host's root volume. **(DEPRECATED.)** Use `replication_specs.#.region_config.#.(analytics_specs|electable_specs|read_only_specs).disk_size_gb` instead. To learn more, see the [Migration Guide](../guides/1.18.0-upgrade-guide.html.markdown) for more details. * `encryption_at_rest_provider` - Possible values are AWS, GCP, AZURE or NONE. * `tags` - Set that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster. See [below](#tags). * `labels` - Set that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster. See [below](#labels). * `mongo_db_major_version` - Version of the cluster to deploy. * `pit_enabled` - Flag that indicates if the cluster uses Continuous Cloud Backup. -* `replication_specs` - Configuration for cluster regions and the hardware provisioned in them. See [below](#replication_specs) +* `replication_specs` - List of settings that configure your cluster regions. If `use_replication_spec_per_shard = true`, this array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations. See [below](#replication_specs) * `root_cert_type` - Certificate Authority that MongoDB Atlas clusters use. * `termination_protection_enabled` - Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster. * `version_release_system` - Release cadence that Atlas uses for this cluster. @@ -92,10 +136,11 @@ Key-value pairs that categorize the cluster. Each key and value has a maximum le ### replication_specs -* `num_shards` - Provide this value if you set a `cluster_type` of SHARDED or GEOSHARDED. +* `num_shards` - Provide this value if you set a `cluster_type` of SHARDED or GEOSHARDED. **(DEPRECATED.)** To learn more, see the [Migration Guide](../guides/1.18.0-upgrade-guide.html.markdown) for more details. * `region_configs` - Configuration for the hardware specifications for nodes set for a given regionEach `region_configs` object describes the region's priority in elections and the number and type of MongoDB nodes that Atlas deploys to the region. Each `region_configs` object must have either an `analytics_specs` object, `electable_specs` object, or `read_only_specs` object. See [below](#region_configs) * `container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container either created programmatically by the user before any clusters existed in a project or when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`. * `zone_name` - Name for the zone in a Global Cluster. +* `zone_id` - Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. If clusterType is GEOSHARDED, this value indicates the zone that the given shard belongs to and can be used to configure Global Cluster backup policies. ### region_configs @@ -112,12 +157,13 @@ Key-value pairs that categorize the cluster. Each key and value has a maximum le ### specs -* `disk_iops` - Target throughput (IOPS) desired for AWS storage attached to your cluster. +* `disk_iops` - Target IOPS (Input/Output Operations Per Second) desired for storage attached to this hardware. This parameter defaults to the cluster tier's standard IOPS value. * `ebs_volume_type` - Type of storage you want to attach to your AWS-provisioned cluster. * `STANDARD` volume types can't exceed the default IOPS rate for the selected volume size. * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. * `instance_size` - Hardware specification for the instance sizes in this region. * `node_count` - Number of nodes of the given type for MongoDB Atlas to deploy to the region. +* `disk_size_gb` - Storage capacity that the host's root volume possesses expressed in gigabytes. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. ### auto_scaling @@ -137,9 +183,9 @@ Key-value pairs that categorize the cluster. Each key and value has a maximum le #### Advanced Configuration -* `default_read_concern` - [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/). +* `default_read_concern` - [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/). **(DEPRECATED.)** MongoDB 5.0 and later clusters default to `local`. To use a custom read concern level, please refer to your driver documentation. * `default_write_concern` - [Default level of acknowledgment requested from MongoDB for write operations](https://docs.mongodb.com/manual/reference/write-concern/) set for this cluster. MongoDB 4.4 clusters default to [1](https://docs.mongodb.com/manual/reference/write-concern/). -* `fail_index_key_too_long` - When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. +* `fail_index_key_too_long` - When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. **(DEPRECATED.)** This parameter has been removed as of [MongoDB 4.4](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.failIndexKeyTooLong). * `javascript_enabled` - When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations. * `minimum_enabled_tls_protocol` - Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are: diff --git a/docs/data-sources/cloud_backup_schedule.md b/docs/data-sources/cloud_backup_schedule.md index 1aa383656a..31c9ffb255 100644 --- a/docs/data-sources/cloud_backup_schedule.md +++ b/docs/data-sources/cloud_backup_schedule.md @@ -33,11 +33,31 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { reference_hour_of_day = 3 reference_minute_of_hour = 45 restore_window_days = 4 + + policy_item_daily { + frequency_interval = 1 + retention_unit = "days" + retention_value = 14 + } + + copy_settings { + cloud_provider = "AWS" + frequencies = ["HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY", + "YEARLY", + "ON_DEMAND"] + region_name = "US_EAST_1" + zone_id = mongodbatlas_advanced_cluster.my_cluster.replication_specs.*.zone_id[0] + should_copy_oplogs = false + } } data "mongodbatlas_cloud_backup_schedule" "test" { project_id = mongodbatlas_cloud_backup_schedule.test.project_id cluster_name = mongodbatlas_cloud_backup_schedule.test.cluster_name + use_zone_id_for_copy_settings = true } ``` @@ -45,6 +65,8 @@ data "mongodbatlas_cloud_backup_schedule" "test" { * `project_id` - (Required) The unique identifier of the project for the Atlas cluster. * `cluster_name` - (Required) The name of the Atlas cluster that contains the snapshots backup policy you want to retrieve. +* `use_zone_id_for_copy_settings` - Set this field to `true` to allow the data source to use the latest schema that populates `copy_settings.#.zone_id` instead of the deprecated `copy_settings.#.replication_spec_id`. These fields also enable you to reference cluster zones using independent shard scaling, which no longer supports `replication_spec.*.id`. To learn more, see the [1.18.0 upgrade guide](../guides/1.18.0-upgrade-guide.md#transition-cloud-backup-schedules-for-clusters-to-use-zones). + ## Attributes Reference @@ -56,41 +78,44 @@ In addition to all arguments above, the following attributes are exported: * `reference_minute_of_hour` - UTC Minute of day between 0 and 59 representing which minute of the `reference_hour_of_day` that Atlas takes the snapshot. * `restore_window_days` - Specifies a restore window in days for cloud backup to maintain. * `id_policy` - Unique identifier of the backup policy. -* `policy_item_hourly` - Hourly policy item -* `policy_item_daily` - Daily policy item -* `policy_item_weekly` - Weekly policy item -* `policy_item_monthly` - Monthly policy item -* `policy_item_yearly` - Yearly policy item +* `policy_item_hourly` - (Optional) Hourly policy item. See [below](#policy_item_hourly) +* `policy_item_daily` - (Optional) Daily policy item. See [below](#policy_item_daily) +* `policy_item_weekly` - (Optional) Weekly policy item. See [below](#policy_item_weekly) +* `policy_item_monthly` - (Optional) Monthly policy item. See [below](#policy_item_monthly) +* `policy_item_yearly` - (Optional) Yearly policy item. See [below](#policy_item_yearly) * `auto_export_enabled` - Flag that indicates whether automatic export of cloud backup snapshots to the AWS bucket is enabled. Value can be one of the following: * true - enables automatic export of cloud backup snapshots to the AWS bucket * false - disables automatic export of cloud backup snapshots to the AWS bucket (default) * `use_org_and_group_names_in_export_prefix` - Specify true to use organization and project names instead of organization and project UUIDs in the path for the metadata files that Atlas uploads to your S3 bucket after it finishes exporting the snapshots. To learn more about the metadata files that Atlas uploads, see [Export Cloud Backup Snapshot](https://www.mongodb.com/docs/atlas/backup/cloud-backup/export/#std-label-cloud-provider-snapshot-export). -### Export +* `copy_settings` - List that contains a document for each copy setting item in the desired backup policy. See [below](#copy_settings) +* `export` - Policy for automatically exporting Cloud Backup Snapshots. See [below](#export) + +### export * `export_bucket_id` - Unique identifier of the mongodbatlas_cloud_backup_snapshot_export_bucket export_bucket_id value. * `frequency_type` - Frequency associated with the export snapshot item. -### Policy Item Hourly +### policy_item_hourly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For hourly policies, the frequency type is defined as `hourly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (hourly in this case). The supported values for hourly policies are `1`, `2`, `4`, `6`, `8` or `12` hours. Note that `12` hours is the only accepted value for NVMe clusters. * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. -### Policy Item Daily +### policy_item_daily * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For daily policies, the frequency type is defined as `daily`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (daily in this case). The only supported value for daily policies is `1` day. * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Note that for less frequent policy items, Atlas requires that you specify a retention period greater than or equal to the retention period specified for more frequent policy items. For example: If the hourly policy item specifies a retention of two days, the daily retention policy must specify two days or greater. -### Policy Item Weekly +### policy_item_weekly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For weekly policies, the frequency type is defined as `weekly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (weekly in this case). The supported values for weekly policies are `1` through `7`, where `1` represents Monday and `7` represents Sunday. * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Weekly policy must have retention of at least 7 days or 1 week. Note that for less frequent policy items, Atlas requires that you specify a retention period greater than or equal to the retention period specified for more frequent policy items. For example: If the daily policy item specifies a retention of two weeks, the weekly retention policy must specify two weeks or greater. -### Policy Item Monthly +### policy_item_monthly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For monthly policies, the frequency type is defined as `monthly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (monthly in this case). The supported values for weekly policies are @@ -99,7 +124,7 @@ In addition to all arguments above, the following attributes are exported: * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Monthly policy must have retention days of at least 31 days or 5 weeks or 1 month. Note that for less frequent policy items, Atlas requires that you specify a retention period greater than or equal to the retention period specified for more frequent policy items. For example: If the weekly policy item specifies a retention of two weeks, the montly retention policy must specify two weeks or greater. -### Policy Item Yearly +### policy_item_yearly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For yearly policies, the frequency type is defined as `yearly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (yearly in this case). The supported values for yearly policies are @@ -107,13 +132,14 @@ In addition to all arguments above, the following attributes are exported: * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Yearly policy must have retention of at least 1 year. -### Snapshot Distribution +### copy_settings * `cloud_provider` - Human-readable label that identifies the cloud provider that stores the snapshot copy. i.e. "AWS" "AZURE" "GCP" * `frequencies` - List that describes which types of snapshots to copy. i.e. "HOURLY" "DAILY" "WEEKLY" "MONTHLY" "YEARLY" "ON_DEMAND" * `region_name` - Target region to copy snapshots belonging to replicationSpecId to. Please supply the 'Atlas Region' which can be found under https://www.mongodb.com/docs/atlas/reference/cloud-providers/ 'regions' link -* `replication_spec_id` - Unique 24-hexadecimal digit string that identifies the replication object for a zone in a cluster. For global clusters, there can be multiple zones to choose from. For sharded clusters and replica set clusters, there is only one zone in the cluster. To find the Replication Spec Id, consult the replicationSpecs array returned from [Return One Multi-Cloud Cluster in One Project](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Clusters/operation/getCluster). +* `zone_id` - Unique 24-hexadecimal digit string that identifies the zone in a cluster. For global clusters, there can be multiple zones to choose from. For sharded clusters and replica set clusters, there is only one zone in the cluster. +* `replication_spec_id` - Unique 24-hexadecimal digit string that identifies the replication object for a zone in a cluster. For global clusters, there can be multiple zones to choose from. For sharded clusters and replica set clusters, there is only one zone in the cluster. To find the Replication Spec Id, consult the replicationSpecs array returned from [Return One Multi-Cloud Cluster in One Project](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Clusters/operation/getCluster). **(DEPRECATED)** Use `zone_id` instead. To learn more, see the [1.18.0 upgrade guide](../guides/1.18.0-upgrade-guide.md#transition-cloud-backup-schedules-for-clusters-to-use-zones). * `should_copy_oplogs` - Flag that indicates whether to copy the oplogs to the target region. You can use the oplogs to perform point-in-time restores. **Note** The parameter deleteCopiedBackups is not supported in terraform please leverage Atlas Admin API or AtlasCLI instead to manage the lifecycle of backup snaphot copies. -For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/cloud-backup/schedule/get-all-schedules/) +For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/cloud-backup/schedule/get-all-schedules/) \ No newline at end of file diff --git a/docs/data-sources/cloud_backup_snapshot_restore_job.md b/docs/data-sources/cloud_backup_snapshot_restore_job.md index 909d83aee8..c080d2aeee 100644 --- a/docs/data-sources/cloud_backup_snapshot_restore_job.md +++ b/docs/data-sources/cloud_backup_snapshot_restore_job.md @@ -27,9 +27,9 @@ resource "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { } data "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { - project_id = mongodbatlas_cloud_backup_snapshot_restore_job.test.project_id + project_id = mongodbatlas_cloud_backup_snapshot_restore_job.test.project_id cluster_name = mongodbatlas_cloud_backup_snapshot_restore_job.test.cluster_name - job_id = mongodbatlas_cloud_backup_snapshot_restore_job.test.id + snapshot_restore_job_id = mongodbatlas_cloud_backup_snapshot_restore_job.test.snapshot_restore_job_id } ``` @@ -37,8 +37,7 @@ data "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { * `project_id` - (Required) The unique identifier of the project for the Atlas cluster. * `cluster_name` - (Required) The name of the Atlas cluster for which you want to retrieve the restore job. -* `job_id` - (Optional) A base64-encoded ID of `project_id`, `cluster_name`, and `job_id` of this resource. **Note**: This attribute is deprecated, use `snapshot_restore_job_id` instead. -* `snapshot_restore_job_id` - (Optional) The unique identifier of the restore job to retrieve. Required for versions 1.18.0 and later. +* `snapshot_restore_job_id` - (Required) The unique identifier of the restore job to retrieve. ## Attributes Reference @@ -60,4 +59,4 @@ In addition to all arguments above, the following attributes are exported: * `oplogInc` - Oplog operation number from which to you want to restore this snapshot. * `pointInTimeUTCSeconds` - Timestamp in the number of seconds that have elapsed since the UNIX epoch. -For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/cloud-backup/restore/get-one-restore-job/) \ No newline at end of file +For more information see: [MongoDB Atlas API Reference.](https://docs.atlas.mongodb.com/reference/api/cloud-backup/restore/get-one-restore-job/) diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index 4aa219be3d..2d70b437ca 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -2,13 +2,13 @@ `mongodbatlas_cluster` describes a Cluster. The data source requires your Project ID. --> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. - ~> **IMPORTANT:**
• Multi Region Cluster: The `mongodbatlas_cluster` data source doesn't return the `container_id` for each region utilized by the cluster. For retrieving the `container_id`, we recommend the [`mongodbatlas_advanced_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/data-sources/advanced_cluster) data source instead.
• Changes to cluster configurations can affect costs. Before making changes, please see [Billing](https://docs.atlas.mongodb.com/billing/).
• If your Atlas project contains a custom role that uses actions introduced in a specific MongoDB version, you cannot create a cluster with a MongoDB version less than that version unless you delete the custom role. +-> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. + ## Example Usage ```terraform @@ -124,8 +124,6 @@ In addition to all arguments above, the following attributes are exported: * `container_id` - The Network Peering Container ID. --> **NOTE:** If you need to get an existing container ID see the [How-To Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/howto-guide.html). - * `version_release_system` - Release cadence that Atlas uses for this cluster. * `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details. @@ -234,4 +232,4 @@ Contains a key-value pair that tags that the cluster was created by a Terraform * `sample_refresh_interval_bi_connector` - Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled. * `transaction_lifetime_limit_seconds` - Lifetime, in seconds, of multi-document transactions. Defaults to 60 seconds. -See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) \ No newline at end of file +See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index b37cff038b..5dd99be4d9 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -2,13 +2,13 @@ `mongodbatlas_cluster` describes all Clusters by the provided project_id. The data source requires your Project ID. --> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. - ~> **IMPORTANT:**
• Multi Region Cluster: The `mongodbatlas_cluster` data source doesn't return the `container_id` for each region utilized by the cluster. For retrieving the `container_id`, we recommend the [`mongodbatlas_advanced_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/data-sources/advanced_clusters) data source instead.
• Changes to cluster configurations can affect costs. Before making changes, please see [Billing](https://docs.atlas.mongodb.com/billing/).
• If your Atlas project contains a custom role that uses actions introduced in a specific MongoDB version, you cannot create a cluster with a MongoDB version less than that version unless you delete the custom role. +-> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. + ## Example Usage ```terraform @@ -115,8 +115,6 @@ In addition to all arguments above, the following attributes are exported: * `container_id` - The Network Peering Container ID. --> **NOTE:** If you need to get an existing container ID see the [How-To Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/howto-guide.html). - * `version_release_system` - Release cadence that Atlas uses for this cluster. * `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details. @@ -222,4 +220,4 @@ Contains a key-value pair that tags that the cluster was created by a Terraform * `sample_refresh_interval_bi_connector` - Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled. -See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) \ No newline at end of file +See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) diff --git a/docs/data-sources/federated_settings_identity_providers.md b/docs/data-sources/federated_settings_identity_providers.md index 0e8fad7380..1aa52dc8f2 100644 --- a/docs/data-sources/federated_settings_identity_providers.md +++ b/docs/data-sources/federated_settings_identity_providers.md @@ -2,6 +2,8 @@ `mongodbatlas_federated_settings_identity_providers` provides an Federated Settings Identity Providers datasource. Atlas Cloud Federated Settings Identity Providers provides federated settings outputs for the configured Identity Providers. +Note: This implementation returns a maximum of 100 results. + ## Example Usage ```terraform @@ -24,8 +26,6 @@ data "mongodbatlas_federated_settings_identity_providers" "identitty_provider" { ## Argument Reference * `federation_settings_id` - (Required) Unique 24-hexadecimal digit string that identifies the federated authentication configuration. -* `page_num` - (Optional) The page to return. Defaults to `1`. **Note**: This attribute is deprecated and not being used. -* `items_per_page` - (Optional) Number of items to return per page, up to a maximum of 500. Defaults to `100`. **Note**: This attribute is deprecated and not being used. The implementation is currently limited to returning a maximum of 100 results. * `protocols` - (Optional) The protocols of the target identity providers. Valid values are `SAML` and `OIDC`. * `idp_types` - (Optional) The types of the target identity providers. Valid values are `WORKFORCE` and `WORKLOAD`. diff --git a/docs/data-sources/global_cluster_config.md b/docs/data-sources/global_cluster_config.md index dd74ba44da..42fd408e27 100644 --- a/docs/data-sources/global_cluster_config.md +++ b/docs/data-sources/global_cluster_config.md @@ -9,59 +9,89 @@ ## Example Usage ```terraform - resource "mongodbatlas_cluster" "test" { - project_id = "" - name = "" - cloud_backup = true - cluster_type = "GEOSHARDED" - - //Provider Settings "block" - provider_name = "AWS" - provider_instance_size_name = "M30" - - replication_specs { - zone_name = "Zone 1" - num_shards = 2 - regions_config { - region_name = "EU_CENTRAL_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - - replication_specs { - zone_name = "Zone 2" - num_shards = 2 - regions_config { - region_name = "US_EAST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } +resource "mongodbatlas_advanced_cluster" "test" { + project_id = "" + name = "" + cluster_type = "GEOSHARDED" + backup_enabled = true + + replication_specs { # Zone 1, shard 1 + zone_name = "Zone 1" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_CENTRAL_1" + } + } + + replication_specs { # Zone 1, shard 2 + zone_name = "Zone 1" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_CENTRAL_1" + } + } + + replication_specs { # Zone 2, shard 1 + zone_name = "Zone 2" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_2" + } + } + + replication_specs { # Zone 2, shard 2 + zone_name = "Zone 2" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_2" + } + } +} + +resource "mongodbatlas_global_cluster_config" "config" { + project_id = mongodbatlas_advanced_cluster.test.project_id + cluster_name = mongodbatlas_advanced_cluster.test.name + + managed_namespaces { + db = "mydata" + collection = "publishers" + custom_shard_key = "city" } - resource "mongodbatlas_global_cluster_config" "config" { - project_id = mongodbatlas_cluster.test.project_id - cluster_name = mongodbatlas_cluster.test.name - - managed_namespaces { - db = "mydata" - collection = "publishers" - custom_shard_key = "city" - } - - custom_zone_mappings { - location ="CA" - zone = "Zone 1" - } + custom_zone_mappings { + location ="CA" + zone = "Zone 1" } +} - data "mongodbatlas_global_cluster_config" "config" { - project_id = mongodbatlas_global_cluster_config.config.project_id - cluster_name = mongodbatlas_global_cluster_config.config.cluster_name - } + data "mongodbatlas_global_cluster_config" "config" { + project_id = mongodbatlas_global_cluster_config.config.project_id + cluster_name = mongodbatlas_global_cluster_config.config.cluster_name +} ``` ## Argument Reference diff --git a/docs/data-sources/network_container.md b/docs/data-sources/network_container.md index eabff26980..2eccf6de1f 100644 --- a/docs/data-sources/network_container.md +++ b/docs/data-sources/network_container.md @@ -51,5 +51,3 @@ In addition to all arguments above, the following attributes are exported: See detailed information for arguments and attributes: [MongoDB API Network Peering Container](https://docs.atlas.mongodb.com/reference/api/vpc-create-container/) - --> **NOTE:** If you need to get an existing container ID see the [How-To Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/howto-guide.html). \ No newline at end of file diff --git a/docs/data-sources/network_containers.md b/docs/data-sources/network_containers.md index 8418e6bd2d..8006a7c125 100644 --- a/docs/data-sources/network_containers.md +++ b/docs/data-sources/network_containers.md @@ -54,5 +54,3 @@ In addition to all arguments above, the following attributes are exported: See detailed information for arguments and attributes: [MongoDB API Network Peering Container](https://docs.atlas.mongodb.com/reference/api/vpc-get-containers-list/) - --> **NOTE:** If you need to get an existing container ID see the [How-To Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/howto-guide.html). \ No newline at end of file diff --git a/docs/data-sources/network_peering.md b/docs/data-sources/network_peering.md index 40ab373db6..ba638edf3a 100644 --- a/docs/data-sources/network_peering.md +++ b/docs/data-sources/network_peering.md @@ -4,9 +4,6 @@ -> **NOTE:** Groups and projects are synonymous terms. You may find **group_id** in the official documentation. --> **NOTE:** If you need to get an existing container ID see the [How-To Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/howto-guide.html). - - ## Example Usage ### Basic Example (AWS). @@ -57,4 +54,4 @@ In addition to all arguments above, the following attributes are exported: * `network_name` - Name of the network peer to which Atlas connects. * `error_message` - When `"status" : "FAILED"`, Atlas provides a description of the error. -See detailed information for arguments and attributes: [MongoDB API Network Peering Connection](https://docs.atlas.mongodb.com/reference/api/vpc-get-connection/) \ No newline at end of file +To learn more about arguments and attributes, see the [MongoDB API Network Peering Connection](https://docs.atlas.mongodb.com/reference/api/vpc-get-connection/) documentation. diff --git a/docs/data-sources/privatelink_endpoint_service.md b/docs/data-sources/privatelink_endpoint_service.md index 13a40f7525..6c531716fa 100644 --- a/docs/data-sources/privatelink_endpoint_service.md +++ b/docs/data-sources/privatelink_endpoint_service.md @@ -118,6 +118,5 @@ In addition to all arguments above, the following attributes are exported: * `endpoint_name` - Forwarding rule that corresponds to the endpoint you created in GCP. * `ip_address` - Private IP address of the network endpoint group you created in GCP. * `status` - Status of the endpoint. Atlas returns one of the [values shown above](https://docs.atlas.mongodb.com/reference/api/private-endpoints-endpoint-create-one/#std-label-ref-status-field). - * `service_attachment_name` - Unique alphanumeric and special character strings that identify the service attachment associated with the endpoint. See [MongoDB Atlas API](https://docs.atlas.mongodb.com/reference/api/private-endpoints-endpoint-get-one/) Documentation for more information. diff --git a/docs/data-sources/third_party_integration.md b/docs/data-sources/third_party_integration.md index bd2c9c25aa..127382fec2 100644 --- a/docs/data-sources/third_party_integration.md +++ b/docs/data-sources/third_party_integration.md @@ -61,7 +61,6 @@ Additional values based on Type * `user_name` - Your Prometheus username. * `password` - Your Prometheus password. * `service_discovery` - Indicates which service discovery method is used, either file or http. - * `scheme` - Your Prometheus protocol scheme configured for requests. **Note** This attribute is deprecated as it is not being used. * `enabled` - Whether your cluster has Prometheus enabled. See [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Third-Party-Integrations/operation/createThirdPartyIntegration) Documentation for more information. diff --git a/docs/data-sources/third_party_integrations.md b/docs/data-sources/third_party_integrations.md index c177cc3490..5c0bc831b7 100644 --- a/docs/data-sources/third_party_integrations.md +++ b/docs/data-sources/third_party_integrations.md @@ -75,7 +75,6 @@ Additional values based on Type * `PROMETHEUS` * `user_name` - Your Prometheus username. * `service_discovery` - Indicates which service discovery method is used, either file or http. - * `scheme` - Your Prometheus protocol scheme configured for requests. **Note** This attribute is deprecated as it is not being used. * `enabled` - Whether your cluster has Prometheus enabled. See [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Third-Party-Integrations/operation/createThirdPartyIntegration) Documentation for more information. diff --git a/docs/guides/0.9.0-upgrade-guide.md b/docs/guides/0.9.0-upgrade-guide.md index 9337516895..172865819f 100644 --- a/docs/guides/0.9.0-upgrade-guide.md +++ b/docs/guides/0.9.0-upgrade-guide.md @@ -7,7 +7,7 @@ subcategory: "Older Guides" Besides the bug fixes, improvements and enhancements listed in the [CHANGELOG](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/CHANGELOG.md) for 0.9.0 we want to call out some specific features and enhancements added to this version: * Added support for LDAP configuration and database users -* Added two options to Cloud Provider Access to allow for both actions in a single apply **NOTE** [migration guide to Cloud Provider Access Setup](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/0.9.1-upgrade-guide#migration-to-cloud-provider-access-setup) +* Added `mongodbatlas_cloud_provider_access_setup` and `mongodbatlas_cloud_provider_access_authorization` resources for Cloud Provider Access to allow you to specify both actions in a single apply command. To learn more, see the [Migration Guide to Cloud Provider Access Setup](0.9.1-upgrade-guide#migration-to-cloud-provider-access-setup). * Apple Silicon (darwin/arm64) support * Added support for the GCP regions parameter for network containers * Added support for Custom DNS Configuration diff --git a/docs/guides/1.10.0-upgrade-guide.md b/docs/guides/1.10.0-upgrade-guide.md index a35cfaeee3..b123c6e366 100644 --- a/docs/guides/1.10.0-upgrade-guide.md +++ b/docs/guides/1.10.0-upgrade-guide.md @@ -21,7 +21,11 @@ The Terraform MongoDB Atlas Provider version 1.10.0 has a number of new and exci **Changes** -- [Programmatic API Key](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Programmatic-API-Keys) Resources and Data Sources have been updated to make easier and more intuitive to use ([`mongodbatlas_api_key`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/api_key), [`mongodbatlas_project_api_key`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/project_api_key) and [`mongodbatlas_project_ip_access_list_key`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/access_list_api_key)). We have created a dedicated [Programmatic API Key Upgrade Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0) to help you transition smoothly from the previous version which was first released in Terraform Provider for MongoDB Atlas in v1.8.0 to the new version in v1.10.0. +- [Programmatic API Key](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Programmatic-API-Keys) Updated the following resources and data sources to make them easier to use: + - [`mongodbatlas_api_key`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/api_key) + - [`mongodbatlas_project_api_key`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/project_api_key) + - [`mongodbatlas_project_ip_access_list_key`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/access_list_api_key) + - Created a dedicated [Migration Guide: Programmatic API Key](Programmatic-API-Key-upgrade-guide-1.10.0) to help you migrate from the previous version, which was released in Terraform Provider MongoDB Atlas in v1.8.0, to the new version in v1.10.0. **Deprecations and Removals:** diff --git a/docs/guides/1.16.0-upgrade-guide.md b/docs/guides/1.16.0-upgrade-guide.md index e93e7ddbb8..4fea02b650 100644 --- a/docs/guides/1.16.0-upgrade-guide.md +++ b/docs/guides/1.16.0-upgrade-guide.md @@ -16,7 +16,7 @@ There is no need to use the environment variable `MONGODB_ATLAS_ENABLE_PREVIEW` **Deprecations and Removals:** - Format of IdP Id that uniquely identifies the identity provider when importing [`mongodbatlas_federated_settings_identity_provider`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/federated_settings_identity_provider) resource and [`mongodbatlas_federated_settings_identity_provider`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/data-sources/federated_settings_identity_provider) data source only accepts the new IdP Id format that is a 24-hex characters long string. -More info can be found in the [previous upgrade guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.15.0-upgrade-guide). +To learn more, see the [1.15.0 Upgrade Guide](1.15.0-upgrade-guide). - Removal of `project_id` attribute in `mongodbatlas_project_api_key` resource. diff --git a/docs/guides/1.18.0-upgrade-guide.md b/docs/guides/1.18.0-upgrade-guide.md new file mode 100644 index 0000000000..99ea9e381e --- /dev/null +++ b/docs/guides/1.18.0-upgrade-guide.md @@ -0,0 +1,117 @@ +--- +page_title: "Upgrade Guide 1.18.0" +--- + +# MongoDB Atlas Provider 1.18.0: Upgrade and Information Guide + +***WARNING:*** For users using the `mongodbatlas_advanced_cluster` resource or data sources, and defining multi sharded clusters, this new version impacts the possibility of editing the definition of these clusters from the Atlas UI. This impact is limited to the first weeks of September 2024. + +The Terraform MongoDB Atlas Provider version 1.18.0 has a number of new and exciting features. + +**New Resources, Data Sources, and Features:** + +- You can now scale the instance size and disk IOPS independently for each individual shard for sharded and geo-sharded clusters defined with `mongodbatlas_advanced_cluster`. To learn more, see the [Advanced Cluster New Sharding Configurations Migration Guide](advanced-cluster-new-sharding-schema). As part of these changes two new attributes are added: + - Use the `replication_specs.*.zone_id` attribute in the `mongodbatlas_advanced_cluster` resource and data sources to identify the zone of each `replication_specs` object. + - Use the `use_replication_spec_per_shard` attribute in the `mongodbatlas_advanced_cluster` data sources to specify whether to obtain `replication_specs` objects for each shard. + +- The cloud backup schedule for a cluster can now be used to configure copy settings by zone instead of replication object in each zone with the `mongodbatlas_cloud_backup_schedule` resource and data sources. This feature also enables you to reference cluster zones using independent shard scaling, which no longer supports `replication_spec.*.id`. + - `copy_settings.*.zone_id`: Users should update their configurations to use this attribute instead of `copy_settings.*.replication_spec_id`. To learn more, [see below](#transition-cloud-backup-schedules-for-clusters-to-use-zones). + +**Deprecations and removals:** + +- Deprecations in `mongodbatlas_advanced_cluster` resource and data sources: + - `replication_specs.*.num_shards`: The `replication_specs` list now supports defining an object for each inidividual shard. Use this new schema instead of the `num_shards` attribute. To learn more, see the [Migration Guide](advanced-cluster-new-sharding-schema). + - `disk_size_gb`: The same attribute is now defined under `replication_specs.*.region_configs.*.(electable_specs|analytics_specs|read_only_specs).disk_size_gb`. Replacing this value doesn't affect the underlying cluster. This change in the value's location aligns this value with the updated API schema and allows for future independent storage size scaling. + - `replication_specs.*.id`: This attribute was previously used by `mongodbatlas_cloud_backup_schedule` resource to identify cluster zones. As of 1.18.0, `mongodbatlas_cloud_backup_schedule` resource can reference cluster zones using the new `zone_id` attribute. + - `advanced_configuration.default_read_concern`: MongoDB 5.0 and later clusters default to `local`. To use a custom read concern level, please refer to your driver documentation. + - `advanced_configuration.fail_index_key_too_long`: This attribute only applies to older versions of MongoDB (removed in 4.4). + +- Deprecations in `mongodbatlas_cloud_backup_schedule` resource and data source: + - `copy_settings.*.replication_spec_id`: Users should use `copy_settings.#.zone_id` instead. This also enables you to reference cluster zones using independent shard scaling, which no longer supports `replication_spec.*.id`. + - In `mongodbatlas_cloud_backup_schedule` data source, users will also need to set `use_zone_id_for_copy_settings = true` to allow the data source to use the latest schema that populates `copy_settings.#.zone_id` instead of the deprecated `copy_settings.#.replication_spec_id`. + + +- Attribute `scheme` removed from `mongodbatlas_third_party_integration` resource and data sources. +- Attributes `page_num` and `items_per_page` removed from `federated_settings_identity_providers` data source. +- Attribute `id` changed from optional to computed only in `mongodbatlas_cloud_backupsnapshot_export_bucket` data source. +- Attribute `id` changed from optional to computed only in `mongodbatlas_cloud_backupsnapshot_export_job` data source. +- Attribute `created_at` removed from `mongodbatlas_cloud_backup_snapshot_restore_job` resource and data sources. +- Attribute `job_id` removed from `mongodbatlas_cloud_backup_snapshot_restore_job` in favour of `snapshot_restore_job_id`. +- Attribute `endpoints.*.service_attachment_name` removed from `mongodbatlas_privatelink_endpoint_service` resource and data source. + + +## Transition Cloud Backup Schedules for Clusters to use zones + +### Update `mongodbatlas_cloud_backup_schedule` resource: + +**Step 1:** Update `copy_settings` in your Terraform configuration and replace usage of `replication_spec_id` with `zone_id`. To find appropriate value for `zone_id`, you can consult the replicationSpecs array returned from [Return One Multi-Cloud Cluster in One Project](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Clusters/operation/getCluster). Alternately, use `mongodbatlas_advanced_cluster` data source or resource and reference `replication_specs.#.zone_id`. + +#### Example 1: Old configuration (`mongodbatlas_cloud_backup_schedule`) +``` +resource "mongodbatlas_cloud_backup_schedule" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + + # other attributes... + + copy_settings { + cloud_provider = "AWS" + frequencies = ["HOURLY", + "DAILY", + "WEEKLY"] + region_name = "US_EAST_1" + replication_spec_id = mongodbatlas_advanced_cluster.my_cluster.replication_specs.*.id[0] # replace with zone_id + should_copy_oplogs = false + } +} +``` + +#### Example 2: Updated configuration (`mongodbatlas_cloud_backup_schedule`) +``` +resource "mongodbatlas_cloud_backup_schedule" "test" { + project_id = mongodbatlas_advanced_cluster.my_cluster.project_id + cluster_name = mongodbatlas_advanced_cluster.my_cluster.name + + # other attributes... + + copy_settings { + cloud_provider = "AWS" + frequencies = ["HOURLY", + "DAILY", + "WEEKLY"] + region_name = "US_EAST_1" + zone_id = mongodbatlas_advanced_cluster.my_cluster.replication_specs.*.zone_id[0] + should_copy_oplogs = false + } +} +``` + + +**Step 2:** In the terminal, run `terraform plan`. You should see a plan generated with an "update in-place" for the `mongodbatlas_cloud_backup_schedule` resource proposing to add the `zone_id` under `copy_settings` as expected. + +**Step 3:** Run `terraform apply`. + +### Update `mongodbatlas_cloud_backup_schedule` data source: + +**Step 1:** Add argument `use_zone_id_for_copy_settings = true` to the data source (with this change the data source will no longer populate `copy_settings.#.replication_spec_id`). The resulting data source should look like below: + +``` +data "mongodbatlas_cloud_backup_schedule" "test" { + project_id = var.project_id + cluster_name = var.cluster_name + use_zone_id_for_copy_settings = true +} +``` + +**Step 2:** Replace any references to `copy_settings.#.replication_spec_id` in your configurations with `copy_settings.#.zone_id`. + +**Step 3:** Run `terraform plan` followed by `terraform apply`. + + +### Helpful Links + +* [Report bugs](https://github.com/mongodb/terraform-provider-mongodbatlas/issues) + +* [Request Features](https://feedback.mongodb.com/forums/924145-atlas?category_id=370723) + +* [Contact Support](https://docs.atlas.mongodb.com/support/) covered by MongoDB Atlas support plans, Developer and above. diff --git a/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.md b/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.md index eec249e566..d48746e2dc 100644 --- a/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.md +++ b/docs/guides/Programmatic-API-Key-upgrade-guide-1.10.0.md @@ -1,12 +1,14 @@ --- -page_title: "Upgrade Guide 1.10.0 for Programmatic API Key" +page_title: "Migration Guide: Programmatic API Key (v1.10.0)" subcategory: "Older Guides" --- -# MongoDB Atlas Provider: Programmatic API Key Upgrade Guide in v1.10.0 -In Terraform MongoDB Atlas Provider v1.10.0, we've focused on refining the management of MongoDB Atlas Programmatic API Keys (PAK) for enhanced sustainability and ease of use over the long term. Hence, rather than embedding the PAK project assignment lifecycle in the `mongodbatlas_project` resource, as done previously, we've implemented a more efficient and intuitive assignment process by including in `mongodbatlas_project_api_key` resource. This guide aims to provide you with a smooth transition from the initial release version of PAK management in v1.8.0 to the new version as part of v1.10.0. +# Migration Guide: Programmatic API Key (v1.10.0) +In Terraform MongoDB Atlas Provider v1.10.0, we improved MongoDB Atlas Programmatic API Keys (PAK) management. -For comprehensive Upgrade Guide on all v1.10.0 modifications see [here](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.10.0-upgrade-guide). +. Previously, you embedded the PAK project assignment lifecycle in the `mongodbatlas_project` resource. In this release, we added a new resource, `mongodbatlas_project_api_key`. Use this guide to start using this new PAK resource. + +To learn more, see the [1.10.0 Upgrade Guide](1.10.0-upgrade-guide). Remember, your scripts will still work with deprecated features for now, but it's best to upgrade as soon as possible to benefit from the latest enhancements. Code removal is planned for v1.12.0 at which point prior PAK workflow will no longer function. diff --git a/docs/guides/advanced-cluster-new-sharding-schema.md b/docs/guides/advanced-cluster-new-sharding-schema.md new file mode 100644 index 0000000000..994cf3aaff --- /dev/null +++ b/docs/guides/advanced-cluster-new-sharding-schema.md @@ -0,0 +1,356 @@ +--- +page_title: "Migration Guide: Advanced Cluster New Sharding Configurations" +--- + +# Migration Guide: Advanced Cluster New Sharding Configurations + +**Objective**: Use this guide to migrate your existing `advanced_cluster` resources to support new sharding configurations introduced in version 1.18.0. The new sharding configurations allow you to scale shards independently. Existing sharding configurations continue to work, but you will receive deprecation messages if you continue to use them. + +- [Migration Guide: Advanced Cluster New Sharding Configurations](#migration-guide-advanced-cluster-new-sharding-schema) + - [Changes Overview](#changes-overview) + - [Migrate advanced\_cluster type `SHARDED`](#migrate-advanced_cluster-type-sharded) + - [Migrate advanced\_cluster type `GEOSHARDED`](#migrate-advanced_cluster-type-geosharded) + - [Migrate advanced\_cluster type `REPLICASET`](#migrate-advanced_cluster-type-replicaset) + - [Use Independent Shard Scaling](#use-independent-shard-scaling) + + +## Changes Overview + +`replication_specs` attribute now represents each individual cluster's shard with a unique replication spec element. +When you use the new sharding configurations, it will no longer use the existing attribute `num_shards`, and instead the number of shards are defined by the number of `replication_specs` elements. + + +### Migrate advanced_cluster type `SHARDED` + +Consider the following configuration of a `SHARDED` cluster using the deprecated `num_shards`: +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "SymmetricShardedCluster" + cluster_type = "SHARDED" + + replication_specs { + # deprecation warning will be encoutered for using num_shards + num_shards = 2 + region_configs { + electable_specs { + instance_size = "M30" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} +``` + +In order to use our new sharding configurations, we will remove the use of `num_shards` and add a new identical `replication_specs` element for each shard. Note that these 2 changes must be done at the same time. + +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "SymmetricShardedCluster" + cluster_type = "SHARDED" + + replication_specs { # first shard + region_configs { + electable_specs { + instance_size = "M30" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { # second shard + region_configs { + electable_specs { + instance_size = "M30" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} +``` + +This updated configuration will trigger a Terraform update plan. However, the underlying cluster will not face any changes after the `apply` command, as both configurations represent a sharded cluster composed of two shards. + +Note: The first time `terraform apply` command is run **after** updating the configuration, you may receive a `500 Internal Server Error (Error code: "SERVICE_UNAVAILABLE")` error. This is a known temporary issue. If you encounter this, please re-run `terraform apply` and this time the update should succeed. + + +### Migrate advanced_cluster type `GEOSHARDED` + +Consider the following configuration of a `GEOSHARDED` cluster using the deprecated `num_shards`: + +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "GeoShardedCluster" + cluster_type = "GEOSHARDED" + + replication_specs { + zone_name = "zone n1" + num_shards = 2 + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + + replication_specs { + zone_name = "zone n2" + num_shards = 2 + + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} +``` + +In order to use our new sharding configurations, we will remove the use of `num_shards` and add a new identical `replication_specs` element for each shard. Note that these two changes must be done at the same time. + +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "GeoShardedCluster" + cluster_type = "GEOSHARDED" + + replication_specs { # first shard for zone n1 + zone_name = "zone n1" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + + replication_specs { # second shard for zone n1 + zone_name = "zone n1" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + + replication_specs { # first shard for zone n2 + zone_name = "zone n2" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { # second shard for zone n2 + zone_name = "zone n2" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} +``` + + + +This updated configuration triggers a Terraform update plan. However, the underlying cluster will not face any changes after the `apply` command, as both configurations represent a geo sharded cluster with two zones and two shards in each one. + +Note: The first time `terraform apply` command is run **after** updating the configuration, you may receive a `500 Internal Server Error (Error code: "SERVICE_UNAVAILABLE")` error. This is a known temporary issue. If you encounter this, please re-run `terraform apply` and this time the update should succeed. + + +### Migrate advanced_cluster type `REPLICASET` + +To learn more, see the documentation on [transitioning from a replicaset to a sharded cluster](https://www.mongodb.com/docs/atlas/scale-cluster/#convert-a-replica-set-to-a-sharded-cluster). + +Consider the following replica set configuration: +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "ReplicaSetTransition" + cluster_type = "REPLICASET" + + replication_specs { + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AZURE" + priority = 7 + region_name = "US_EAST" + } + } +} +``` + +To transition a replica set to sharded cluster 2 separate updates must be applied. First, update the `cluster_type` to SHARDED, and apply this change to the cluster. + +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "ReplicaSetTransition" + cluster_type = "SHARDED" + + replication_specs { + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AZURE" + priority = 7 + region_name = "US_EAST" + } + } +} +``` + +Once the cluster type is adjusted accordingly, we can proceed to add a new shard: + +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "ReplicaSetTransition" + cluster_type = "SHARDED" + + replication_specs { # first shard + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AZURE" + priority = 7 + region_name = "US_EAST" + } + } + + replication_specs { # second shard + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AZURE" + priority = 7 + region_name = "US_EAST" + } + } +} +``` + +Note: The first time `terraform apply` command is run **after** updating the configuration, you may receive a `500 Internal Server Error (Error code: "SERVICE_UNAVAILABLE")` error. This is a known temporary issue. If you encounter this, please re-run `terraform apply` and this time the update should succeed. + + +### Use Independent Shard Scaling + +Use the new sharding configurations. Each shard must be represented with a unique `replication_specs` element and `num_shards` must not be used, as illustrated in the following example. + +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "ShardedCluster" + cluster_type = "SHARDED" + + replication_specs { # first shard + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { # second shard + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} +``` + +With each shard's `replication_specs` defined independently, we can now define distinct `instance_size`, and `disk_iops` (only for AWS) values for each shard in the cluster. In the following example, we define an upgraded instance size of M40 only for the first shard in the cluster. + +Consider reviewing the Metrics Dashboard in the MongoDB Atlas UI (e.g. https://cloud.mongodb.com/v2/#/clusters/detail/ShardedCluster) for insight into how each shard within your cluster is currently performing, which will inform any shard-specific resource allocation changes you might require. + +``` +resource "mongodbatlas_advanced_cluster" "test" { + project_id = var.project_id + name = "ShardedCluster" + cluster_type = "SHARDED" + + replication_specs { # first shard upgraded to M40 + region_configs { + electable_specs { + instance_size = "M40" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { # second shard preserves M30 + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } +} +``` + +-> **NOTE:** For any cluster leveraging the new sharding configurations and defining independently scaled shards, users should also update corresponding `mongodbatlas_cloud_backup_schedule` resource & data sources. This involves updating any existing Terraform configurations of the resource to use `copy_settings.#.zone_id` instead of `copy_settings.#.replication_spec_id`. This is needed as `mongodbatlas_advanced_cluster` resource and data source will no longer have `replication_specs.#.id` present when shards are scaled independently. To learn more, review the [1.18.0 Migration Guide](1.18.0-upgrade-guide.md#transition-cloud-backup-schedules-for-clusters-to-use-zones). diff --git a/docs/guides/cluster-to-advanced-cluster-migration-guide.md b/docs/guides/cluster-to-advanced-cluster-migration-guide.md new file mode 100644 index 0000000000..ccade2d6dc --- /dev/null +++ b/docs/guides/cluster-to-advanced-cluster-migration-guide.md @@ -0,0 +1,181 @@ +--- +page_title: "Migration Guide: Cluster to Advanced Cluster" +--- + +# Migration Guide: Cluster to Advanced Cluster + +**Objective**: This guide explains how to replace the `mongodbatlas_cluster` resource with the `mongodbatlas_advanced_cluster` resource. The data source(s) migration only requires [output changes](#output-changes) as data sources only read clusters. + +## Main Changes Between `mongodbatlas_cluster` and `mongodbatlas_advanced_cluster` + +1. Replication Spec Configuration: Supports different node types (electable, analytics, read_only) where hardware configuration can differ between node types. +2. Provider Settings: Moved from the top level to the replication spec allowing you to create multi-cloud clusters. +3. Auto Scaling: Moved from the top level to the replication spec allowing you to scale replication specs individually. +4. Backup Configuration: Renamed from `cloud_backup` to `backup_enabled`. +5. See the [Migration Guide: Advanced Cluster New Sharding Configurations](advanced-cluster-new-sharding-schema#migration-sharded) for changes to `num_shards` and the new `zone_id`. + +### Example 1: Old Configuration (`mongodbatlas_cluster`) + +```terraform +resource "mongodbatlas_cluster" "this" { + project_id = var.project_id + name = "legacy-cluster" + cluster_type = "REPLICASET" + + provider_instance_size_name = "M10" # 1 Replication Spec Configuration + provider_name = "AWS" # 2 Provider Settings + + auto_scaling_disk_gb_enabled = true # 3 Auto Scaling + cloud_backup = true # 4 Backup Configuration + + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_EAST_1" + priority = 7 + electable_nodes = 3 # 1 Replication Spec Configuration + analytics_nodes = 1 # 1 Replication Spec Configuration + read_only_nodes = 0 # 1 Replication Spec Configuration + } + } +} +``` + +### Example 2: New Configuration (`mongodbatlas_advanced_cluster`) + +```terraform +resource "mongodbatlas_advanced_cluster" "this" { + project_id = var.project_id + name = "advanced-cluster" + cluster_type = "REPLICASET" + backup_enabled = true # 4 Backup Configuration + + replication_specs { + region_configs { + auto_scaling { # 3 Auto Scaling + disk_gb_enabled = true + } + region_name = "US_EAST_1" + priority = 7 + provider_name = "AWS" # 2 Provider Settings + + electable_specs { # 1 Replication Spec Configuration + instance_size = "M10" + node_count = 3 + } + analytics_specs { # 1 Replication Spec Configuration + instance_size = "M10" + node_count = 1 + } + } + } +} +``` + +### Output Changes + +- `container_id`: + - Before: `mongodbatlas_cluster.this.replication_specs[0].container_id` was a flat string, such as: `669644ae01bf814e3d25b963` + - After: `mongodbatlas_advanced_cluster.this.replication_specs[0].container_id` is a map, such as: `{"AWS:US_EAST_1": "669644ae01bf814e3d25b963"}` + - If you have a single region you can access the `container_id` directly with: `one(values(mongodbatlas_advanced_cluster.this.replication_specs[0].container_id))` + +## Best Practices Before Migrating +Before doing any migration create a backup of your [Terraform state file](https://developer.hashicorp.com/terraform/cli/commands/state). + +## Migration using `terraform plan -generate-config-out=adv_cluster.tf` +This method uses only [Terraform native tools](https://developer.hashicorp.com/terraform/language/import/generating-configuration) and is ideal if you: +1. Have an existing cluster without any Terraform configuration and want to manage your cluster with Terraform. +2. Have existing `mongodbatlas_cluster` resource(s) and don't want to use an external script for migrating. + +### Procedure + +1. Find the import IDs of the clusters you want to migrate: `{PROJECT_ID}-{CLUSTER_NAME}`, such as `664619d870c247237f4b86a6-legacy-cluster` +2. Add an import block per cluster to one of your `.tf` files: + ```terraform + import { + to = mongodbatlas_advanced_cluster.this + id = "664619d870c247237f4b86a6-legacy-cluster" # from step 1 + } + ``` +3. Run `terraform plan -generate-config-out=adv_cluster.tf`. This should generate a `adv_cluster.tf` file and display a message similar to `Plan: 1 to import, 0 to add, 0 to change, 0 to destroy`: + ```terraform + resource "mongodbatlas_advanced_cluster" "this" { + # ... most attributes are removed for readability of this guide + # .... + backup_enabled = true + cluster_type = "REPLICASET" + disk_size_gb = 10 + name = "legacy-cluster" + project_id = "664619d870c247237f4b86a6" + state_name = "IDLE" + termination_protection_enabled = false + version_release_system = "LTS" + + advanced_configuration { + default_read_concern = null + default_write_concern = null + fail_index_key_too_long = false + javascript_enabled = true + minimum_enabled_tls_protocol = "TLS1_2" + no_table_scan = false + oplog_min_retention_hours = 0 + oplog_size_mb = 0 + sample_refresh_interval_bi_connector = 0 + sample_size_bi_connector = 0 + transaction_lifetime_limit_seconds = 0 + } + + replication_specs { + container_id = { + "AWS:US_EAST_1" = "669644ae01bf814e3d25b963" + } + id = "66978026668b7619f6f48cf2" + zone_name = "ZoneName managed by Terraform" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_EAST_1" + + auto_scaling { + compute_enabled = false + compute_max_instance_size = null + compute_min_instance_size = null + compute_scale_down_enabled = false + disk_gb_enabled = false + } + + electable_specs { + disk_iops = 3000 + ebs_volume_type = null + instance_size = "M10" + node_count = 3 + } + analytics_specs { + disk_iops = 3000 + ebs_volume_type = null + instance_size = "M10" + node_count = 1 + } + } + } + } + ``` +4. Run `terraform apply`. You should see the resource(s) imported: `Apply complete! Resources: 1 imported, 0 added, 0 changed, 0 destroyed.` +5. Remove the "default" fields. Many fields of this resource are optional. Look for fields with a `null` or `0` value or blocks you didn't specify before, for example: + - `advanced_configuration` + - `connection_strings` + - `cluster_id` + - `bi_connector_config` +6. Re-use existing [Terraform expressions](https://developer.hashicorp.com/terraform/language/expressions). All fields in the generated configuration will have static values. Look in your previous configuration for: + - variables, for example: `var.project_id` + - Terraform keywords, for example: `for_each`, `count`, and `depends_on` +7. Re-run `terraform apply` to ensure you have no planned changes: `No changes. Your infrastructure matches the configuration.` +8. Update the references from your previous cluster resource: `mongodbatlas_cluster.this.XXXX` to the new `mongodbatlas_advanced_cluster.this.XXX`. + - Double check [output-changes](#output-changes) to ensure the underlying configuration stays unchanged. +9. Replace your existing clusters with the ones from `adv_cluster.tf` and run `terraform state rm mongodbatlas_cluster.this`. Without this step, Terraform will create a plan to delete your existing cluster. +1. Remove the import block created in step 2. +2. Re-run `terraform apply` to ensure you have no planned changes: `No changes. Your infrastructure matches the configuration.` + +### Terraform Actions +Using the `project_id` and `cluster.name`, Terraform imports your cluster and uses the new `mongodbatlas_advanced_cluster` schema to generate a configuration file. This file includes all configurable values in the schema, but none of the previous configuration defined for your `mongodbatlas_cluster`. Therefore, the new configuration will likely be a lot more verbose and contain none of your original [Terraform expressions.](https://developer.hashicorp.com/terraform/language/expressions) diff --git a/docs/index.md b/docs/index.md index 06f666ecc1..b2d5124fc2 100644 --- a/docs/index.md +++ b/docs/index.md @@ -219,7 +219,7 @@ We ship binaries but do not prioritize fixes for the following operating system ## Examples from MongoDB and the Community -We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.17.6/examples) +We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.18.1/examples) in our GitHub repo that will help both beginner and more advanced users. Have a good example you've created and want to share? diff --git a/docs/resources/advanced_cluster.md b/docs/resources/advanced_cluster.md index e4eff9132a..2d2d534ec8 100644 --- a/docs/resources/advanced_cluster.md +++ b/docs/resources/advanced_cluster.md @@ -18,7 +18,7 @@ More information on considerations for using advanced clusters please see [Consi -> **NOTE:** To enable Cluster Extended Storage Sizes use the `is_extended_storage_sizes_enabled` parameter in the [mongodbatlas_project resource](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/project). --> **NOTE:** The Low-CPU instance clusters are prefixed with `R`, i.e. `R40`. For complete list of Low-CPU instance clusters see Cluster Configuration Options under each Cloud Provider (https://www.mongodb.com/docs/atlas/reference/cloud-providers/). +-> **NOTE:** The Low-CPU instance clusters are prefixed with `R`, for example `R40`. For complete list of Low-CPU instance clusters see Cluster Configuration Options under each Cloud Provider (https://www.mongodb.com/docs/atlas/reference/cloud-providers/). ## Example Usage @@ -125,7 +125,7 @@ resource "mongodbatlas_advanced_cluster" "test" { } } ``` -### Example of a Multi-Cloud Cluster +### Example of a Multi Cloud Sharded Cluster with 2 shards ```terraform resource "mongodbatlas_advanced_cluster" "cluster" { @@ -134,61 +134,59 @@ resource "mongodbatlas_advanced_cluster" "cluster" { cluster_type = "SHARDED" backup_enabled = true - replication_specs { - num_shards = 3 - - region_configs { # shard n1 + replication_specs { # shard 1 + region_configs { electable_specs { instance_size = "M10" node_count = 3 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AWS" priority = 7 region_name = "US_EAST_1" } - region_configs { # shard n2 + region_configs { electable_specs { instance_size = "M10" node_count = 2 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AZURE" priority = 6 region_name = "US_EAST_2" } + } - region_configs { # shard n3 + replication_specs { # shard 2 + region_configs { electable_specs { instance_size = "M10" - node_count = 2 + node_count = 3 } - analytics_specs { + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + + region_configs { + electable_specs { instance_size = "M10" - node_count = 1 + node_count = 2 } - provider_name = "GCP" - priority = 5 - region_name = "US_EAST_4" + provider_name = "AZURE" + priority = 6 + region_name = "US_EAST_2" } - } + } advanced_configuration { javascript_enabled = true - oplog_size_mb = 30 + oplog_size_mb = 991 sample_refresh_interval_bi_connector = 300 } } ``` -### Example of a Global Cluster +### Example of a Global Cluster with 2 zones ```terraform resource "mongodbatlas_advanced_cluster" "cluster" { project_id = mongodbatlas_project.project.id @@ -196,97 +194,99 @@ resource "mongodbatlas_advanced_cluster" "cluster" { cluster_type = "GEOSHARDED" backup_enabled = true - replication_specs { # zone n1 + replication_specs { # shard 1 - zone n1 zone_name = "zone n1" - num_shards = 3 # 3-shard Multi-Cloud Cluster - region_configs { # shard n1 + region_configs { electable_specs { instance_size = "M10" node_count = 3 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AWS" priority = 7 region_name = "US_EAST_1" } - region_configs { # shard n2 + region_configs { electable_specs { instance_size = "M10" node_count = 2 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AZURE" priority = 6 region_name = "US_EAST_2" } + } + + replication_specs { # shard 2 - zone n1 + zone_name = "zone n1" - region_configs { # shard n3 + region_configs { electable_specs { instance_size = "M10" - node_count = 2 + node_count = 3 } - analytics_specs { + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + + region_configs { + electable_specs { instance_size = "M10" - node_count = 1 + node_count = 2 } - provider_name = "GCP" - priority = 5 - region_name = "US_EAST_4" + provider_name = "AZURE" + priority = 6 + region_name = "US_EAST_2" } } - replication_specs { # zone n2 + replication_specs { # shard 1 - zone n2 zone_name = "zone n2" - num_shards = 2 # 2-shard Multi-Cloud Cluster - region_configs { # shard n1 + region_configs { electable_specs { instance_size = "M10" node_count = 3 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AWS" priority = 7 region_name = "EU_WEST_1" } - region_configs { # shard n2 + region_configs { electable_specs { instance_size = "M10" node_count = 2 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AZURE" priority = 6 region_name = "EUROPE_NORTH" } + } - region_configs { # shard n3 + replication_specs { # shard 2 - zone n2 + zone_name = "zone n2" + + region_configs { electable_specs { instance_size = "M10" - node_count = 2 + node_count = 3 } - analytics_specs { + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + + region_configs { + electable_specs { instance_size = "M10" - node_count = 1 + node_count = 2 } - provider_name = "GCP" - priority = 5 - region_name = "US_EAST_4" + provider_name = "AZURE" + priority = 6 + region_name = "EUROPE_NORTH" } } @@ -377,13 +377,13 @@ This parameter defaults to false. - `SHARDED` Sharded cluster - `GEOSHARDED` Global Cluster -* `disk_size_gb` - (Optional) Capacity, in gigabytes, of the host's root volume. Increase this number to add capacity, up to a maximum possible value of 4096 (i.e., 4 TB). This value must be a positive number. You can't set this value with clusters with local [NVMe SSDs](https://docs.atlas.mongodb.com/cluster-tier/#std-label-nvme-storage). The minimum disk size for dedicated clusters is 10 GB for AWS and GCP. If you specify diskSizeGB with a lower disk size, Atlas defaults to the minimum disk size value. If your cluster includes Azure nodes, this value must correspond to an existing Azure disk type (8, 16, 32, 64, 128, 256, 512, 1024, 2048, or 4095)Atlas calculates storage charges differently depending on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require additional storage space beyond this limitation, consider [upgrading your cluster](https://docs.atlas.mongodb.com/scale-cluster/#std-label-scale-cluster-instance) to a higher tier. If your cluster spans cloud service providers, this value defaults to the minimum default of the providers involved. +* `disk_size_gb` - (Optional) Capacity, in gigabytes, of the host's root volume. Increase this number to add capacity, up to a maximum possible value of 4096 (4 TB). This value must be a positive number. You can't set this value with clusters with local [NVMe SSDs](https://docs.atlas.mongodb.com/cluster-tier/#std-label-nvme-storage). The minimum disk size for dedicated clusters is 10 GB for AWS and GCP. If you specify diskSizeGB with a lower disk size, Atlas defaults to the minimum disk size value. If your cluster includes Azure nodes, this value must correspond to an existing Azure disk type (8, 16, 32, 64, 128, 256, 512, 1024, 2048, or 4095)Atlas calculates storage charges differently depending on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require additional storage space beyond this limitation, consider [upgrading your cluster](https://docs.atlas.mongodb.com/scale-cluster/#std-label-scale-cluster-instance) to a higher tier. If your cluster spans cloud service providers, this value defaults to the minimum default of the providers involved. **(DEPRECATED)** Use `replication_specs.#.region_config.#.(analytics_specs|electable_specs|read_only_specs).disk_size_gb` instead. To learn more, see the [1.18.0 upgrade guide](../guides/1.18.0-upgrade-guide.html.markdown). * `encryption_at_rest_provider` - (Optional) Possible values are AWS, GCP, AZURE or NONE. Only needed if you desire to manage the keys, see [Encryption at Rest using Customer Key Management](https://docs.atlas.mongodb.com/security-kms-encryption/) for complete documentation. You must configure encryption at rest for the Atlas project before enabling it on any cluster in the project. For Documentation, see [AWS](https://docs.atlas.mongodb.com/security-aws-kms/), [GCP](https://docs.atlas.mongodb.com/security-kms-encryption/) and [Azure](https://docs.atlas.mongodb.com/security-azure-kms/#std-label-security-azure-kms). Requirements are if `replication_specs.#.region_configs.#.Specs.instance_size` is M10 or greater and `backup_enabled` is false or omitted. * `tags` - (Optional) Set that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster. See [below](#tags). * `labels` - (Optional) Set that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster. See [below](#labels). **DEPRECATED** Use `tags` instead. * `mongo_db_major_version` - (Optional) Version of the cluster to deploy. Atlas supports the following MongoDB versions for M10+ clusters: `4.4`, `5.0`, `6.0` or `7.0`. If omitted, Atlas deploys a cluster that runs MongoDB 7.0. If `replication_specs#.region_configs#.Specs.instance_size`: `M0`, `M2` or `M5`, Atlas deploys MongoDB 4.4. Atlas always deploys the cluster with the latest stable release of the specified version. If you set a value to this parameter and set `version_release_system` `CONTINUOUS`, the resource returns an error. Either clear this parameter or set `version_release_system`: `LTS`. * `pit_enabled` - (Optional) - Flag that indicates if the cluster uses Continuous Cloud Backup. -* `replication_specs` - Configuration for cluster regions and the hardware provisioned in them. See [below](#replication_specs) +* `replication_specs` - List of settings that configure your cluster regions. This attribute has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations. If for each replication_spec `num_shards` is configured with a value greater than 1 (using deprecated sharding configurations), then each object represents a zone with one or more shards. See [below](#replication_specs) * `root_cert_type` - (Optional) - Certificate Authority that MongoDB Atlas clusters use. You can specify ISRGROOTX1 (for ISRG Root X1). * `termination_protection_enabled` - Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster. * `version_release_system` - (Optional) - Release cadence that Atlas uses for this cluster. This parameter defaults to `LTS`. If you set this field to `CONTINUOUS`, you must omit the `mongo_db_major_version` field. Atlas accepts: @@ -438,9 +438,9 @@ Include **desired options** within advanced_configuration: } ``` -* `default_read_concern` - (Optional) [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/). +* `default_read_concern` - (Optional) [Default level of acknowledgment requested from MongoDB for read operations](https://docs.mongodb.com/manual/reference/read-concern/) set for this cluster. MongoDB 4.4 clusters default to [available](https://docs.mongodb.com/manual/reference/read-concern-available/). **(DEPRECATED)** MongoDB 5.0 and later clusters default to `local`. To use a custom read concern level, please refer to your driver documentation. * `default_write_concern` - (Optional) [Default level of acknowledgment requested from MongoDB for write operations](https://docs.mongodb.com/manual/reference/write-concern/) set for this cluster. MongoDB 4.4 clusters default to [1](https://docs.mongodb.com/manual/reference/write-concern/). -* `fail_index_key_too_long` - (Optional) When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. +* `fail_index_key_too_long` - (Optional) When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. **(DEPRECATED)** This parameter has been removed as of [MongoDB 4.4](https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.failIndexKeyTooLong). * `javascript_enabled` - (Optional) When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations. * `minimum_enabled_tls_protocol` - (Optional) Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are: @@ -531,16 +531,17 @@ replication_specs { ``` * `num_shards` - (Optional) Provide this value if you set a `cluster_type` of SHARDED or GEOSHARDED. Omit this value if you selected a `cluster_type` of REPLICASET. This API resource accepts 1 through 50, inclusive. This parameter defaults to 1. If you specify a `num_shards` value of 1 and a `cluster_type` of SHARDED, Atlas deploys a single-shard [sharded cluster](https://docs.atlas.mongodb.com/reference/glossary/#std-term-sharded-cluster). Don't create a sharded cluster with a single shard for production environments. Single-shard sharded clusters don't provide the same benefits as multi-shard configurations. -If you are upgrading a replica set to a sharded cluster, you cannot increase the number of shards in the same update request. You should wait until after the cluster has completed upgrading to sharded and you have reconnected all application clients to the MongoDB router before adding additional shards. Otherwise, your data might become inconsistent once MongoDB Cloud begins distributing data across shards. To learn more, see [Convert a replica set to a sharded cluster documentation](https://www.mongodb.com/docs/atlas/scale-cluster/#convert-a-replica-set-to-a-sharded-cluster) and [Convert a replica set to a sharded cluster tutorial](https://www.mongodb.com/docs/upcoming/tutorial/convert-replica-set-to-replicated-shard-cluster). +If you are upgrading a replica set to a sharded cluster, you cannot increase the number of shards in the same update request. You should wait until after the cluster has completed upgrading to sharded and you have reconnected all application clients to the MongoDB router before adding additional shards. Otherwise, your data might become inconsistent once MongoDB Cloud begins distributing data across shards. To learn more, see [Convert a replica set to a sharded cluster documentation](https://www.mongodb.com/docs/atlas/scale-cluster/#convert-a-replica-set-to-a-sharded-cluster) and [Convert a replica set to a sharded cluster tutorial](https://www.mongodb.com/docs/upcoming/tutorial/convert-replica-set-to-replicated-shard-cluster). **(DEPRECATED)** To learn more, see the [1.18.0 Upgrade Guide](../guides/1.18.0-upgrade-guide.html.markdown). * `region_configs` - (Optional) Configuration for the hardware specifications for nodes set for a given regionEach `region_configs` object describes the region's priority in elections and the number and type of MongoDB nodes that Atlas deploys to the region. Each `region_configs` object must have either an `analytics_specs` object, `electable_specs` object, or `read_only_specs` object. See [below](#region_configs) * `zone_name` - (Optional) Name for the zone in a Global Cluster. +* `zone_id` - Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. If clusterType is GEOSHARDED, this value indicates the zone that the given shard belongs to and can be used to configure Global Cluster backup policies. ### region_configs * `analytics_specs` - (Optional) Hardware specifications for [analytics nodes](https://docs.atlas.mongodb.com/reference/faq/deployment/#std-label-analytics-nodes-overview) needed in the region. Analytics nodes handle analytic data such as reporting queries from BI Connector for Atlas. Analytics nodes are read-only and can never become the [primary](https://docs.atlas.mongodb.com/reference/glossary/#std-term-primary). If you don't specify this parameter, no analytics nodes deploy to this region. See [below](#specs) -* `auto_scaling` - (Optional) Configuration for the Collection of settings that configures auto-scaling information for the cluster. The values for the `auto_scaling` parameter must be the same for every item in the `replication_specs` array. See [below](#auto_scaling) -* `analytics_auto_scaling` - (Optional) Configuration for the Collection of settings that configures analytics-auto-scaling information for the cluster. The values for the `analytics_auto_scaling` parameter must be the same for every item in the `replication_specs` array. See [below](#analytics_auto_scaling) +* `auto_scaling` - (Optional) Configuration for the Collection of settings that configures auto-scaling information for the cluster. The values for the `auto_scaling` parameter must be the same for all `region_configs` in all `replication_specs`. See [below](#auto_scaling) +* `analytics_auto_scaling` - (Optional) Configuration for the Collection of settings that configures analytics-auto-scaling information for the cluster. The values for the `analytics_auto_scaling` parameter must be the same for all `region_configs` in all `replication_specs`. See [below](#analytics_auto_scaling) * `backing_provider_name` - (Optional) Cloud service provider on which you provision the host for a multi-tenant cluster. Use this only when a `provider_name` is `TENANT` and `instance_size` of a specs is `M2` or `M5`. * `electable_specs` - (Optional) Hardware specifications for electable nodes in the region. Electable nodes can become the [primary](https://docs.atlas.mongodb.com/reference/glossary/#std-term-primary) and can enable local reads. If you do not specify this option, no electable nodes are deployed to the region. See [below](#specs) * `priority` - (Optional) Election priority of the region. For regions with only read-only nodes, set this value to 0. @@ -558,30 +559,34 @@ If you are upgrading a replica set to a sharded cluster, you cannot increase the ### electable_specs -* `instance_size` - (Required) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. -* `disk_iops` - (Optional) Target throughput (IOPS) desired for AWS storage attached to your cluster. Set only if you selected AWS as your cloud service provider. You can't set this parameter for a multi-cloud cluster. +* `instance_size` - (Required) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. Electable nodes and read-only nodes (known as "base nodes") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards. +* `disk_iops` - (Optional) Target IOPS (Input/Output Operations Per Second) desired for storage attached to this hardware. Define this attribute only if you selected AWS as your cloud service provider, `instance_size` is set to "M30" or greater (not including "Mxx_NVME" tiers), and `ebs_volume_type` is "PROVISIONED". You can't set this attribute for a multi-cloud cluster. * `ebs_volume_type` - (Optional) Type of storage you want to attach to your AWS-provisioned cluster. Set only if you selected AWS as your cloud service provider. You can't set this parameter for a multi-cloud cluster. Valid values are: * `STANDARD` volume types can't exceed the default IOPS rate for the selected volume size. * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. * `node_count` - (Optional) Number of nodes of the given type for MongoDB Atlas to deploy to the region. +* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. This value must be equal for all shards and node types. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using `disk_size_gb` with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that `disk_size_gb` is used exclusively with Provisioned IOPS will help avoid these issues. + ### analytics_specs -* `disk_iops` - (Optional) Target throughput (IOPS) desired for AWS storage attached to your cluster. Set only if you selected AWS as your cloud service provider. You can't set this parameter for a multi-cloud cluster. +* `instance_size` - (Required) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. Electable nodes and read-only nodes (known as "base nodes") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards. +* `disk_iops` - (Optional) Target IOPS (Input/Output Operations Per Second) desired for storage attached to this hardware. Define this attribute only if you selected AWS as your cloud service provider, `instance_size` is set to "M30" or greater (not including "Mxx_NVME" tiers), and `ebs_volume_type` is "PROVISIONED". You can't set this attribute for a multi-cloud cluster. * `ebs_volume_type` - (Optional) Type of storage you want to attach to your AWS-provisioned cluster. Set only if you selected AWS as your cloud service provider. You can't set this parameter for a multi-cloud cluster. Valid values are: * `STANDARD` volume types can't exceed the default IOPS rate for the selected volume size. * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. -* `instance_size` - (Optional) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. * `node_count` - (Optional) Number of nodes of the given type for MongoDB Atlas to deploy to the region. +* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. This value must be equal for all shards and node types. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using `disk_size_gb` with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that `disk_size_gb` is used exclusively with Provisioned IOPS will help avoid these issues. ### read_only_specs -* `disk_iops` - (Optional) Target throughput (IOPS) desired for AWS storage attached to your cluster. Set only if you selected AWS as your cloud service provider. You can't set this parameter for a multi-cloud cluster. +* `instance_size` - (Required) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. Electable nodes and read-only nodes (known as "base nodes") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards. +* `disk_iops` - (Optional) Target IOPS (Input/Output Operations Per Second) desired for storage attached to this hardware. Define this attribute only if you selected AWS as your cloud service provider, `instance_size` is set to "M30" or greater (not including "Mxx_NVME" tiers), and `ebs_volume_type` is "PROVISIONED". You can't set this attribute for a multi-cloud cluster. This parameter defaults to the cluster tier's standard IOPS value. * `ebs_volume_type` - (Optional) Type of storage you want to attach to your AWS-provisioned cluster. Set only if you selected AWS as your cloud service provider. You can't set this parameter for a multi-cloud cluster. Valid values are: * `STANDARD` volume types can't exceed the default IOPS rate for the selected volume size. * `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. -* `instance_size` - (Optional) Hardware specification for the instance sizes in this region. Each instance size has a default storage and memory capacity. The instance size you select applies to all the data-bearing hosts in your instance size. * `node_count` - (Optional) Number of nodes of the given type for MongoDB Atlas to deploy to the region. +* `disk_size_gb` - (Optional) Storage capacity that the host's root volume possesses expressed in gigabytes. This value must be equal for all shards and node types. If disk size specified is below the minimum (10 GB), this parameter defaults to the minimum disk size value. Storage charge calculations depend on whether you choose the default value or a custom value. The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier. **Note:** Using `disk_size_gb` with Standard IOPS could lead to errors and configuration issues. Therefore, it should be used only with the [Provisioned IOPS volume type](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster#PROVISIONED). When using Provisioned IOPS, the disk_size_gb parameter specifies the storage capacity, but the IOPS are set independently. Ensuring that `disk_size_gb` is used exclusively with Provisioned IOPS will help avoid these issues. ### auto_scaling @@ -589,19 +594,19 @@ If you are upgrading a replica set to a sharded cluster, you cannot increase the - Set to `true` to enable disk auto-scaling. - Set to `false` to disable disk auto-scaling. -~> **IMPORTANT:** If `disk_gb_enabled` is true, then Atlas will automatically scale disk size up and down. -This will cause the value of `disk_size_gb` returned to potentially be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster disk size back to the original `disk_size_gb` value. -To prevent this a lifecycle customization should be used, i.e.: +~> **IMPORTANT:** If `disk_gb_enabled` is true, Atlas automatically scales the cluster up or down. +This will cause the value of `replication_specs.#.region_config.#.(analytics_specs|electable_specs|read_only_specs).disk_size_gb` returned to potentially be different than what is specified in the Terraform config and if you use, and then apply, not noting this, Terraform will scale the cluster disk size back to the original `disk_size_gb` value. +To prevent disk scaling use a lifecycle customization: `lifecycle { - ignore_changes = [disk_size_gb] + ignore_changes = [replication_specs.#.region_config.#.electable_specs.disk_size_gb] }` -After adding the `lifecycle` block to explicitly change `disk_size_gb` comment out the `lifecycle` block and run `terraform apply`. Please be sure to uncomment the `lifecycle` block once done to prevent any accidental changes. +After adding the `lifecycle` block to explicitly change `replication_specs.#.region_config.#.(analytics_specs|electable_specs|read_only_specs).disk_size_gb` comment out the `lifecycle` block and run `terraform apply`. Please be sure to uncomment the `lifecycle` block once done to prevent any accidental changes. ```terraform // Example: ignore disk_size_gb and instance_size changes in a replica set lifecycle { ignore_changes = [ - disk_size_gb, + replication_specs[0].region_configs[0].electable_specs[0].disk_size_gb, replication_specs[0].region_configs[0].electable_specs[0].instance_size, replication_specs[0].region_configs[1].electable_specs[0].instance_size, replication_specs[0].region_configs[2].electable_specs[0].instance_size, @@ -611,9 +616,9 @@ lifecycle { * `compute_enabled` - (Optional) Flag that indicates whether instance size auto-scaling is enabled. This parameter defaults to false. -~> **IMPORTANT:** If `compute_enabled` is true, then Atlas will automatically scale up to the maximum provided and down to the minimum, if provided. -This will cause the value of `instance_size` returned to potentially be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster back to the original `instance_size` value. -To prevent this a lifecycle customization should be used, i.e.: +~> **IMPORTANT:** If `compute_enabled` is true, Atlas automatically scales the cluster to the maximum provided and down to the minimum, if provided. +This will cause the value of `instance_size` returned to potentially be different than what is specified in the Terraform config and if you then apply a plan, not noting this, Terraform will scale the cluster back to the original `instance_size` value. +To prevent disk scaling, use a lifecycle customization, as in the following example: `lifecycle { ignore_changes = [instance_size] }` @@ -629,8 +634,8 @@ After adding the `lifecycle` block to explicitly change `instance_size` comment * `compute_enabled` - (Optional) Flag that indicates whether instance size auto-scaling is enabled. This parameter defaults to false. ~> **IMPORTANT:** If `compute_enabled` is true, then Atlas will automatically scale up to the maximum provided and down to the minimum, if provided. -This will cause the value of `instance_size` returned to potential be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster back down to the original `instance_size` value. -To prevent this a lifecycle customization should be used, i.e.: +This will cause the value of `instance_size` returned to potential be different than what is specified in the Terraform config and if you then apply a plan, not noting this, Terraform will scale the cluster back down to the original `instance_size` value. +To prevent compute scaling, use a lifecycle customization, as in the following example: `lifecycle { ignore_changes = [instance_size] }` @@ -674,8 +679,7 @@ In addition to all arguments above, the following attributes are exported: - DELETING - DELETED - REPAIRING -* `replication_specs` - Set of replication specifications for the cluster. Primary usage is covered under the [replication_specs argument reference](#replication_specs), though there are some computed attributes: - - `replication_specs.#.container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container created when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`. +* `replication_specs.#.container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container created when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`. ## Import @@ -686,3 +690,7 @@ $ terraform import mongodbatlas_advanced_cluster.my_cluster 1112222b3bf99403840e ``` See detailed information for arguments and attributes: [MongoDB API Advanced Clusters](https://docs.atlas.mongodb.com/reference/api/cluster-advanced/create-one-cluster-advanced/) + +~> **IMPORTANT:** +
• When a cluster is imported, the resulting schema structure will always return the new schema including `replication_specs` per independent shards of the cluster. +
• Note: The first time `terraform apply` command is run **after** updating the configuration of an imported cluster, you may receive a `500 Internal Server Error (Error code: "SERVICE_UNAVAILABLE")` error. This is a known temporary issue. If you encounter this, please re-run `terraform apply` and this time the update should succeed. diff --git a/docs/resources/cloud_backup_schedule.md b/docs/resources/cloud_backup_schedule.md index 4643b4c524..8ca89135f8 100644 --- a/docs/resources/cloud_backup_schedule.md +++ b/docs/resources/cloud_backup_schedule.md @@ -204,7 +204,7 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { "YEARLY", "ON_DEMAND"] region_name = "US_EAST_1" - replication_spec_id = mongodbatlas_advanced_cluster.my_cluster.replication_specs.*.id[0] + zone_id = mongodbatlas_advanced_cluster.my_cluster.replication_specs.*.zone_id[0] should_copy_oplogs = false } @@ -221,41 +221,43 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { **Note** This parameter does not return updates on return from API, this is a feature of the MongoDB Atlas Admin API itself and not Terraform. For more details about this resource see [Cloud Backup Schedule](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Cloud-Backups/operation/getBackupSchedule). -* `policy_item_hourly` - (Optional) Hourly policy item -* `policy_item_daily` - (Optional) Daily policy item -* `policy_item_weekly` - (Optional) Weekly policy item -* `policy_item_monthly` - (Optional) Monthly policy item -* `policy_item_yearly` - (Optional) Yearly policy item +* `policy_item_hourly` - (Optional) Hourly policy item. See [below](#policy_item_hourly) +* `policy_item_daily` - (Optional) Daily policy item. See [below](#policy_item_daily) +* `policy_item_weekly` - (Optional) Weekly policy item. See [below](#policy_item_weekly) +* `policy_item_monthly` - (Optional) Monthly policy item. See [below](#policy_item_monthly) +* `policy_item_yearly` - (Optional) Yearly policy item. See [below](#policy_item_yearly) * `auto_export_enabled` - Flag that indicates whether automatic export of cloud backup snapshots to the AWS bucket is enabled. Value can be one of the following: * true - enables automatic export of cloud backup snapshots to the AWS bucket * false - disables automatic export of cloud backup snapshots to the AWS bucket (default) * `use_org_and_group_names_in_export_prefix` - Specify true to use organization and project names instead of organization and project UUIDs in the path for the metadata files that Atlas uploads to your S3 bucket after it finishes exporting the snapshots. To learn more about the metadata files that Atlas uploads, see [Export Cloud Backup Snapshot](https://www.mongodb.com/docs/atlas/backup/cloud-backup/export/#std-label-cloud-provider-snapshot-export). -### Export +* `copy_settings` - List that contains a document for each copy setting item in the desired backup policy. See [below](#copy_settings) +* `export` - Policy for automatically exporting Cloud Backup Snapshots. See [below](#export) +### export * `export_bucket_id` - Unique identifier of the mongodbatlas_cloud_backup_snapshot_export_bucket export_bucket_id value. * `frequency_type` - Frequency associated with the export snapshot item. -### Policy Item Hourly +### policy_item_hourly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For hourly policies, the frequency type is defined as `hourly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (hourly in this case). The supported values for hourly policies are `1`, `2`, `4`, `6`, `8` or `12` hours. Note that `12` hours is the only accepted value for NVMe clusters. * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. -### Policy Item Daily +### policy_item_daily * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For daily policies, the frequency type is defined as `daily`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (daily in this case). The only supported value for daily policies is `1` day. * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Note that for less frequent policy items, Atlas requires that you specify a retention period greater than or equal to the retention period specified for more frequent policy items. For example: If the hourly policy item specifies a retention of two days, the daily retention policy must specify two days or greater. -### Policy Item Weekly +### policy_item_weekly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For weekly policies, the frequency type is defined as `weekly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (weekly in this case). The supported values for weekly policies are `1` through `7`, where `1` represents Monday and `7` represents Sunday. * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Weekly policy must have retention of at least 7 days or 1 week. Note that for less frequent policy items, Atlas requires that you specify a retention period greater than or equal to the retention period specified for more frequent policy items. For example: If the daily policy item specifies a retention of two weeks, the weekly retention policy must specify two weeks or greater. -### Policy Item Monthly +### policy_item_monthly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For monthly policies, the frequency type is defined as `monthly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (monthly in this case). The supported values for weekly policies are @@ -264,7 +266,7 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Monthly policy must have retention days of at least 31 days or 5 weeks or 1 month. Note that for less frequent policy items, Atlas requires that you specify a retention period greater than or equal to the retention period specified for more frequent policy items. For example: If the weekly policy item specifies a retention of two weeks, the montly retention policy must specify two weeks or greater. -### Policy Item Yearly +### policy_item_yearly * `id` - Unique identifier of the backup policy item. * `frequency_type` - Frequency associated with the backup policy item. For yearly policies, the frequency type is defined as `yearly`. Note that this is a read-only value and not required in plan files - its value is implied from the policy resource type. * `frequency_interval` - Desired frequency of the new backup policy item specified by `frequency_type` (yearly in this case). The supported values for yearly policies are @@ -272,12 +274,12 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { * `retention_unit` - Scope of the backup policy item: `days`, `weeks`, `months`, or `years`. * `retention_value` - Value to associate with `retention_unit`. Yearly policy must have retention of at least 1 year. -### Snapshot Distribution -* +### copy_settings * `cloud_provider` - (Required) Human-readable label that identifies the cloud provider that stores the snapshot copy. i.e. "AWS" "AZURE" "GCP" * `frequencies` - (Required) List that describes which types of snapshots to copy. i.e. "HOURLY" "DAILY" "WEEKLY" "MONTHLY" "ON_DEMAND" * `region_name` - (Required) Target region to copy snapshots belonging to replicationSpecId to. Please supply the 'Atlas Region' which can be found under https://www.mongodb.com/docs/atlas/reference/cloud-providers/ 'regions' link -* `replication_spec_id` -(Required) Unique 24-hexadecimal digit string that identifies the replication object for a zone in a cluster. For global clusters, there can be multiple zones to choose from. For sharded clusters and replica set clusters, there is only one zone in the cluster. To find the Replication Spec Id, consult the replicationSpecs array returned from [Return One Multi-Cloud Cluster in One Project](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Clusters/operation/getCluster). +* `zone_id` - Unique 24-hexadecimal digit string that identifies the zone in a cluster. For global clusters, there can be multiple zones to choose from. For sharded clusters and replica set clusters, there is only one zone in the cluster. To find appropriate value for `zone_id`, do a GET request to Return One Cluster from One Project and consult the replicationSpecs array [Return One Cluster From One Project](#operation/getCluster). Alternately, use `mongodbatlas_advanced_cluster` data source or resource and reference `replication_specs.#.zone_id`. +* `replication_spec_id` - Unique 24-hexadecimal digit string that identifies the replication object for a zone in a cluster. For global clusters, there can be multiple zones to choose from. For sharded clusters and replica set clusters, there is only one zone in the cluster. To find the Replication Spec Id, consult the replicationSpecs array returned from [Return One Multi-Cloud Cluster in One Project](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/v2/#tag/Clusters/operation/getCluster). **(DEPRECATED)** Use `zone_id` instead. To learn more, see the [1.18.0 upgrade guide](../guides/1.18.0-upgrade-guide.md#transition-cloud-backup-schedules-for-clusters-to-use-zones). * `should_copy_oplogs` - (Required) Flag that indicates whether to copy the oplogs to the target region. You can use the oplogs to perform point-in-time restores. ## Attributes Reference diff --git a/docs/resources/cloud_provider_access.md b/docs/resources/cloud_provider_access.md index 331f250bb8..e1d77a6dcd 100644 --- a/docs/resources/cloud_provider_access.md +++ b/docs/resources/cloud_provider_access.md @@ -6,7 +6,7 @@ The Terraform MongoDB Atlas Provider offers the following path to perform an aut the initial configuration (create, delete operations). The second resource, `mongodbatlas_cloud_provider_access_authorization`, helps to perform the authorization using the role_id of the first resource. This path is helpful in a multi-provider Terraform file, and allows for a single and decoupled apply. See example of this Two Resource path option with AWS Cloud [here](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/master/examples/mongodbatlas_cloud_provider_access/aws) and AZURE Cloud [here](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/master/examples/mongodbatlas_cloud_provider_access/azure). --> **IMPORTANT** If you want to move from the single resource path to the two resources path see the [migration guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/0.9.1-upgrade-guide#migration-to-cloud-provider-access-setup) +-> **IMPORTANT** If you want to move from the single resource path to the two resources path, see the [Migration Guide](../guides/0.9.1-upgrade-guide#migration-to-cloud-provider-access-setup) ## mongodbatlas_cloud_provider_access_setup @@ -147,4 +147,4 @@ Conditional ## Import mongodbatlas_cloud_provider_access_authorization -The Cloud Provider Access Authorization resource cannot be imported. \ No newline at end of file +You can't import the Cloud Provider Access Authorization resource. diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index 9faad9b92f..9e39f404b3 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -2,6 +2,9 @@ `mongodbatlas_cluster` provides a Cluster resource. The resource lets you create, edit and delete clusters. The resource requires your Project ID. +~> **IMPORTANT:** +
• New Users: If you are not already using `mongodbatlas_cluster` for your deployment we recommend starting with the [`mongodbatlas_advanced_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster). `mongodbatlas_advanced_cluster` has the same functionality as `mongodbatlas_cluster` but also supports multi-cloud clusters. + -> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation. -> **NOTE:** A network container is created for a cluster to reside in. To use this container with another resource, such as peering, reference the computed`container_id` attribute on the cluster. @@ -13,7 +16,6 @@ -> **NOTE:** The Low-CPU instance clusters are prefixed with `R`, i.e. `R40`. For complete list of Low-CPU instance clusters see Cluster Configuration Options under each Cloud Provider (https://www.mongodb.com/docs/atlas/reference/cloud-providers/). ~> **IMPORTANT:** -
• New Users: If you are not already using `mongodbatlas_cluster` for your deployment we recommend starting with the [`mongodbatlas_advanced_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster). `mongodbatlas_advanced_cluster` has all the same functionality as `mongodbatlas_cluster` but also supports multi-cloud clusters.
• Multi Region Cluster: The `mongodbatlas_cluster` resource doesn't return the `container_id` for each region utilized by the cluster. For retrieving the `container_id`, we recommend to use the [`mongodbatlas_advanced_cluster`](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/advanced_cluster) resource instead.
• Free tier cluster creation (M0) is supported.
• Shared tier clusters (M0, M2, M5) can be upgraded to dedicated tiers (M10+) via this provider. WARNING WHEN UPGRADING TENANT/SHARED CLUSTERS!!! Any change from shared tier to a different instance size will be considered a tenant upgrade. When upgrading from shared tier to dedicated simply change the `provider_name` from "TENANT" to your preferred provider (AWS, GCP, AZURE) and remove the variable `backing_provider_name`, for example if you have an existing tenant/shared cluster and want to upgrade your Terraform config should be changed from: @@ -618,4 +620,4 @@ Clusters can be imported using project ID and cluster name, in the format `PROJE $ terraform import mongodbatlas_cluster.my_cluster 1112222b3bf99403840e8934-Cluster0 ``` -See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) \ No newline at end of file +See detailed information for arguments and attributes: [MongoDB API Clusters](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) diff --git a/docs/resources/encryption_at_rest.md b/docs/resources/encryption_at_rest.md index ea85a74fa2..5ac02ddbe4 100644 --- a/docs/resources/encryption_at_rest.md +++ b/docs/resources/encryption_at_rest.md @@ -6,6 +6,8 @@ [Azure Key Vault](https://docs.atlas.mongodb.com/security-azure-kms/#security-azure-kms) [Google Cloud KMS](https://docs.atlas.mongodb.com/security-gcp-kms/#security-gcp-kms) +The [encryption at rest Terraform module](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/encryption-at-rest/mongodbatlas/latest) makes use of this resource and simplifies its use. + After configuring at least one Encryption at Rest provider for the Atlas project, Project Owners can enable Encryption at Rest for each Atlas cluster for which they require encryption. The Encryption at Rest provider does not have to match the cluster cloud service provider. Atlas does not automatically rotate user-managed encryption keys. Defer to your preferred Encryption at Rest provider’s documentation and guidance for best practices on key rotation. Atlas automatically creates a 90-day key rotation alert when you configure Encryption at Rest using your Key Management in an Atlas project. diff --git a/docs/resources/global_cluster_config.md b/docs/resources/global_cluster_config.md index 313e5943e4..f672120dcf 100644 --- a/docs/resources/global_cluster_config.md +++ b/docs/resources/global_cluster_config.md @@ -13,56 +13,58 @@ ### Example Global cluster ```terraform - resource "mongodbatlas_cluster" "test" { - project_id = "" - name = "" - cloud_backup = true - cluster_type = "GEOSHARDED" - - //Provider Settings "block" - provider_name = "AWS" - provider_instance_size_name = "M30" - - replication_specs { - zone_name = "Zone 1" - num_shards = 1 - regions_config { - region_name = "EU_CENTRAL_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - - replication_specs { - zone_name = "Zone 2" - num_shards = 1 - regions_config { - region_name = "US_EAST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } +resource "mongodbatlas_advanced_cluster" "test" { + project_id = "" + name = "" + cluster_type = "GEOSHARDED" + backup_enabled = true + + replication_specs { + zone_name = "Zone 1" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_CENTRAL_1" + } + } + + replication_specs { + zone_name = "Zone 2" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_2" + } + } +} + +resource "mongodbatlas_global_cluster_config" "config" { + project_id = mongodbatlas_advanced_cluster.test.project_id + cluster_name = mongodbatlas_advanced_cluster.test.name + + managed_namespaces { + db = "mydata" + collection = "publishers" + custom_shard_key = "city" + is_custom_shard_key_hashed = false + is_shard_key_unique = false } - resource "mongodbatlas_global_cluster_config" "config" { - project_id = mongodbatlas_cluster.test.project_id - cluster_name = mongodbatlas_cluster.test.name - - managed_namespaces { - db = "mydata" - collection = "publishers" - custom_shard_key = "city" - is_custom_shard_key_hashed = false - is_shard_key_unique = false - } - - custom_zone_mappings { - location ="CA" - zone = "Zone 1" - } + custom_zone_mappings { + location ="CA" + zone = "Zone 1" } +} ``` ## Argument Reference diff --git a/docs/resources/network_container.md b/docs/resources/network_container.md index f43e35c89b..bfe429aa9a 100644 --- a/docs/resources/network_container.md +++ b/docs/resources/network_container.md @@ -100,5 +100,3 @@ $ terraform import mongodbatlas_network_container.my_container 1112222b3bf994038 ``` See detailed information for arguments and attributes: [MongoDB API Network Peering Container](https://docs.atlas.mongodb.com/reference/api/vpc-create-container/) - --> **NOTE:** If you need to get an existing container ID see the [How-To Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/howto-guide.html). \ No newline at end of file diff --git a/docs/resources/network_peering.md b/docs/resources/network_peering.md index cc2b640751..901074cc03 100644 --- a/docs/resources/network_peering.md +++ b/docs/resources/network_peering.md @@ -404,5 +404,3 @@ atlas projects list atlas networking peering list --projectId --provider ``` See detailed information for arguments and attributes: [MongoDB API Network Peering Connection](https://docs.atlas.mongodb.com/reference/api/vpc-create-peering-connection/) - --> **NOTE:** If you need to get an existing container ID see the [How-To Guide](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/howto-guide.html). diff --git a/docs/resources/private_endpoint_regional_mode.md b/docs/resources/private_endpoint_regional_mode.md index b4d2ae9ea1..ee02202126 100644 --- a/docs/resources/private_endpoint_regional_mode.md +++ b/docs/resources/private_endpoint_regional_mode.md @@ -18,33 +18,59 @@ resource "mongodbatlas_private_endpoint_regional_mode" "test" { enabled = true } -resource "mongodbatlas_cluster" "cluster-atlas" { - project_id = var.atlasprojectid - name = var.cluster_name - cloud_backup = true - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "5.0" - cluster_type = "GEOSHARDED" - replication_specs { - zone_name = "Zone 1" - num_shards = 2 - regions_config { - region_name = var.atlas_region_east - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 +resource "mongodbatlas_advanced_cluster" "cluster_atlas" { + project_id = var.atlasprojectid + name = var.cluster_name + cluster_type = "GEOSHARDED" + backup_enabled = true + + replication_specs { # Shard 1 + zone_name = "Zone 1" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = var.atlas_region_east } - regions_config { - region_name = var.atlas_region_west - electable_nodes = 2 - priority = 6 - read_only_nodes = 0 + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 2 + } + provider_name = "AWS" + priority = 6 + region_name = var.atlas_region_west } } - # Provider settings - provider_name = "AWS" - provider_instance_size_name = "M30" + replication_specs { # Shard 2 + zone_name = "Zone 1" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = var.atlas_region_east + } + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 2 + } + provider_name = "AWS" + priority = 6 + region_name = var.atlas_region_west + } + } depends_on = [ mongodbatlas_privatelink_endpoint_service.test_west, @@ -78,7 +104,7 @@ resource "aws_vpc_endpoint" "test_west" { resource "mongodbatlas_privatelink_endpoint" "test_east" { project_id = "var.atlasprojectid provider_name = "AWS" - region = "US_WEST_1" + region = "US_EAST_1" } resource "mongodbatlas_privatelink_endpoint_service" "test_east" { @@ -111,8 +137,8 @@ You can create only sharded clusters when you enable the regionalized private en ## Additional Reference In addition to the example shown above, keep in mind: -* `mongodbatlas_cluster.cluster-atlas.depends_on` - Make your cluster dependent on the project's `mongodbatlas_private_endpoint_regional_mode` as well as any relevant `mongodbatlas_privatelink_endpoint_service` resources. See an [example](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/master/examples/aws-privatelink-endpoint/cluster-geosharded). -* `mongodbatlas_cluster.cluster-atlas.connection_strings` will differ based on the value of `mongodbatlas_private_endpoint_regional_mode.test.enabled`. +* `mongodbatlas_advanced_cluster.cluster_atlas.depends_on` - Make your cluster dependent on the project's `mongodbatlas_private_endpoint_regional_mode` as well as any relevant `mongodbatlas_privatelink_endpoint_service` resources. See an [example](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/master/examples/aws-privatelink-endpoint/cluster-geosharded). +* `mongodbatlas_advanced_cluster.cluster_atlas.connection_strings` will differ based on the value of `mongodbatlas_private_endpoint_regional_mode.test.enabled`. * For more information on usage with GCP, see [our Privatelink Endpoint Service documentation: Example with GCP](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/privatelink_endpoint_service#example-with-gcp) * For more information on usage with Azure, see [our Privatelink Endpoint Service documentation: Examples with Azure](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/privatelink_endpoint_service#example-with-azure) @@ -123,4 +149,4 @@ Private Endpoint Regional Mode can be imported using project id in format `{proj $ terraform import mongodbatlas_private_endpoint_regional_mode.test 1112222b3bf99403840e8934 ``` -See detailed information for arguments and attributes: **Private Endpoints** [Get Regional Mode](https://www.mongodb.com/docs/atlas/reference/api/private-endpoints-get-regional-mode/) | [Update Regional Mode](https://www.mongodb.com/docs/atlas/reference/api/private-endpoints-update-regional-mode/) \ No newline at end of file +See detailed information for arguments and attributes: **Private Endpoints** [Get Regional Mode](https://www.mongodb.com/docs/atlas/reference/api/private-endpoints-get-regional-mode/) | [Update Regional Mode](https://www.mongodb.com/docs/atlas/reference/api/private-endpoints-update-regional-mode/) diff --git a/docs/resources/privatelink_endpoint.md b/docs/resources/privatelink_endpoint.md index 0b4eb81665..2753926b24 100644 --- a/docs/resources/privatelink_endpoint.md +++ b/docs/resources/privatelink_endpoint.md @@ -2,6 +2,8 @@ `mongodbatlas_privatelink_endpoint` provides a Private Endpoint resource. This represents a [Private Endpoint Service](https://www.mongodb.com/docs/atlas/security-private-endpoint/#private-endpoint-concepts) that can be created in an Atlas project. +The [private link Terraform module](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/private-endpoint/mongodbatlas/latest) makes use of this resource and simplifies its use. + ~> **IMPORTANT:**You must have one of the following roles to successfully handle the resource: * Organization Owner * Project Owner @@ -79,4 +81,4 @@ Private Endpoint Service can be imported using project ID, private link ID, prov $ terraform import mongodbatlas_privatelink_endpoint.test 1112222b3bf99403840e8934-3242342343112-AWS-us-east-1 ``` -See detailed information for arguments and attributes: [MongoDB API Private Endpoint Service](https://docs.atlas.mongodb.com/reference/api/private-endpoints-service-create-one//) \ No newline at end of file +See detailed information for arguments and attributes: [MongoDB API Private Endpoint Service](https://docs.atlas.mongodb.com/reference/api/private-endpoints-service-create-one/) diff --git a/docs/resources/privatelink_endpoint_service.md b/docs/resources/privatelink_endpoint_service.md index c0bf2b960c..1427810101 100644 --- a/docs/resources/privatelink_endpoint_service.md +++ b/docs/resources/privatelink_endpoint_service.md @@ -2,6 +2,8 @@ `mongodbatlas_privatelink_endpoint_service` provides a Private Endpoint Interface Link resource. This represents a Private Endpoint Interface Link, which adds one [Interface Endpoint](https://www.mongodb.com/docs/atlas/security-private-endpoint/#private-endpoint-concepts) to a private endpoint connection in an Atlas project. +The [private link Terraform module](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/private-endpoint/mongodbatlas/latest) makes use of this resource and simplifies its use. + ~> **IMPORTANT:**You must have one of the following roles to successfully handle the resource: * Organization Owner * Project Owner @@ -191,7 +193,6 @@ In addition to all arguments above, the following attributes are exported: * `endpoint_group_name` - (Optional) Unique identifier of the endpoint group. The endpoint group encompasses all of the endpoints that you created in GCP. * `endpoints` - Collection of individual private endpoints that comprise your network endpoint group. * `status` - Status of the endpoint. Atlas returns one of the [values shown above](https://docs.atlas.mongodb.com/reference/api/private-endpoints-endpoint-create-one/#std-label-ref-status-field). - * `service_attachment_name` - Unique alphanumeric and special character strings that identify the service attachment associated with the endpoint. ## Import Private Endpoint Link Connection can be imported using project ID and username, in the format `{project_id}--{private_link_id}--{endpoint_service_id}--{provider_name}`, e.g. diff --git a/docs/resources/push_based_log_export.md b/docs/resources/push_based_log_export.md index 5c2f5cb41a..a3ae1e863b 100644 --- a/docs/resources/push_based_log_export.md +++ b/docs/resources/push_based_log_export.md @@ -3,6 +3,8 @@ `mongodbatlas_push_based_log_export` provides a resource for push-based log export feature. The resource lets you configure, enable & disable the project level settings for the push-based log export feature. Using this resource you can continually push logs from mongod, mongos, and audit logs to an Amazon S3 bucket. Atlas exports logs every 5 minutes. +The [push based log export Terraform module](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/push-based-log-export/mongodbatlas/latest) makes use of this resource and simplifies its use. + ## Example Usages diff --git a/docs/resources/stream_connection.md b/docs/resources/stream_connection.md index 962ca1831f..9fd9d50454 100644 --- a/docs/resources/stream_connection.md +++ b/docs/resources/stream_connection.md @@ -96,7 +96,7 @@ If `type` is of value `Kafka` the following additional arguments are defined: ### DBRoleToExecute -* `role` - The name of the role to use. Can be a built in role or a custom role. +* `role` - The name of the role to use. Value can be `atlasAdmin`, `readWriteAnyDatabase`, or `readAnyDatabase` if `type` is set to `BUILT_IN`, or the name of a user-defined role if `type` is set to `CUSTOM`. * `type` - Type of the DB role. Can be either BUILT_IN or CUSTOM. ## Import diff --git a/docs/resources/third_party_integration.md b/docs/resources/third_party_integration.md index 6f0cff660e..4351375759 100644 --- a/docs/resources/third_party_integration.md +++ b/docs/resources/third_party_integration.md @@ -58,7 +58,6 @@ resource "mongodbatlas_third_party_integration" "test_datadog" { * `user_name` - Your Prometheus username. * `password` - Your Prometheus password. * `service_discovery` - Indicates which service discovery method is used, either file or http. - * `scheme` - Your Prometheus protocol scheme configured for requests. **Note:** This attribute is deprecated as it is not being used. * `enabled` - Whether your cluster has Prometheus enabled. ## Attributes Reference diff --git a/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/README.md b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/README.md new file mode 100644 index 0000000000..7565f15a5e --- /dev/null +++ b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/README.md @@ -0,0 +1,62 @@ +# MongoDB Atlas Provider -- Global Cluster +This example creates a project and a Sharded Cluster with 4 independent shards with varying cluster tiers. + + +## Dependencies + +* Terraform MongoDB Atlas Provider v1.18.0 +* A MongoDB Atlas account + +``` +Terraform >= 0.13 ++ provider registry.terraform.io/terraform-providers/mongodbatlas v1.18.0 +``` + + +## Usage +**1\. Ensure your MongoDB Atlas credentials are set up.** + +This can be done using environment variables: + +```bash +export MONGODB_ATLAS_PUBLIC_KEY="xxxx" +export MONGODB_ATLAS_PRIVATE_KEY="xxxx" +``` + +... or follow as in the `variables.tf` file and create **terraform.tfvars** file with all the variable values, ex: +``` +public_key = "" +private_key = "" +atlas_org_id = "" +``` + +... or use [AWS Secrets Manager](https://github.com/mongodb/terraform-provider-mongodbatlas/blob/master/website/docs/index.html.markdown#aws-secrets-manager) + +**2\. Review the Terraform plan.** + +Execute the below command and ensure you are happy with the plan. + +``` bash +$ terraform plan +``` +This project currently supports the below deployments: + +- An Atlas Project +- A Sharded Cluster with independent shards with varying cluster tiers + +**3\. Execute the Terraform apply.** + +Now execute the plan to provision the Atlas Project and Cluster resources. + +``` bash +$ terraform apply +``` + +**4\. Destroy the resources.** + +Once you are finished your testing, ensure you destroy the resources to avoid unnecessary Atlas charges. + +``` bash +$ terraform destroy +``` + diff --git a/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/main.tf b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/main.tf new file mode 100644 index 0000000000..3448b2d688 --- /dev/null +++ b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/main.tf @@ -0,0 +1,79 @@ +provider "mongodbatlas" { + public_key = var.public_key + private_key = var.private_key +} + +resource "mongodbatlas_advanced_cluster" "cluster" { + project_id = mongodbatlas_project.project.id + name = var.cluster_name + cluster_type = "SHARDED" + backup_enabled = true + + replication_specs { # shard 1 - M30 instance size + region_configs { + electable_specs { + instance_size = "M30" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { # shard 2 - M20 instance size + region_configs { + electable_specs { + instance_size = "M20" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { # shard 3 - M10 instance size + region_configs { + electable_specs { + instance_size = "M10" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + replication_specs { # shard 4 - M10 instance size + region_configs { + electable_specs { + instance_size = "M10" + disk_iops = 3000 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + advanced_configuration { + javascript_enabled = true + oplog_size_mb = 999 + sample_refresh_interval_bi_connector = 300 + } + + tags { + key = "environment" + value = "dev" + } +} + +resource "mongodbatlas_project" "project" { + name = "Asymmetric Sharded Cluster" + org_id = var.atlas_org_id +} diff --git a/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/variables.tf b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/variables.tf new file mode 100644 index 0000000000..05e875b6b0 --- /dev/null +++ b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/variables.tf @@ -0,0 +1,17 @@ +variable "atlas_org_id" { + description = "Atlas organization id" + type = string +} +variable "public_key" { + description = "Public API key to authenticate to Atlas" + type = string +} +variable "private_key" { + description = "Private API key to authenticate to Atlas" + type = string +} +variable "cluster_name" { + description = "Atlas cluster name" + type = string + default = "AsymmetricShardedCluster" +} diff --git a/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/versions.tf b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/versions.tf new file mode 100644 index 0000000000..9b4be6c14c --- /dev/null +++ b/examples/mongodbatlas_advanced_cluster/asymmetric-sharded-cluster/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + mongodbatlas = { + source = "mongodb/mongodbatlas" + version = "~> 1.18" + } + } + required_version = ">= 1.0" +} diff --git a/examples/mongodbatlas_advanced_cluster/global-cluster/README.md b/examples/mongodbatlas_advanced_cluster/global-cluster/README.md index 428821a1fe..80f023f2e0 100644 --- a/examples/mongodbatlas_advanced_cluster/global-cluster/README.md +++ b/examples/mongodbatlas_advanced_cluster/global-cluster/README.md @@ -1,5 +1,5 @@ # MongoDB Atlas Provider -- Global Cluster -This example creates a project and a Global Cluster. +This example creates a project and a Global Cluster with 2 zones where each zone has two shards. ## Dependencies @@ -46,7 +46,7 @@ This project currently supports the below deployments: **3\. Execute the Terraform apply.** -Now execute the plan to provision the Federated settings resources. +Now execute the plan to provision the Atlas Project and Cluster resources. ``` bash $ terraform apply diff --git a/examples/mongodbatlas_advanced_cluster/global-cluster/main.tf b/examples/mongodbatlas_advanced_cluster/global-cluster/main.tf index b554cde198..a691d60d50 100644 --- a/examples/mongodbatlas_advanced_cluster/global-cluster/main.tf +++ b/examples/mongodbatlas_advanced_cluster/global-cluster/main.tf @@ -13,79 +13,95 @@ resource "mongodbatlas_advanced_cluster" "cluster" { backup_enabled = true - replication_specs { # zone n1 - zone_name = "zone n1" - num_shards = 3 # 3-shard Multi-Cloud Cluster + replication_specs { # shard 1 - zone n1 + zone_name = "zone n1" - region_configs { # shard n1 + region_configs { electable_specs { instance_size = "M10" node_count = 3 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AWS" priority = 7 region_name = "US_EAST_1" } - region_configs { # shard n2 + region_configs { electable_specs { instance_size = "M10" node_count = 2 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AZURE" priority = 6 region_name = "US_EAST_2" } + } - region_configs { # shard n3 + replication_specs { # shard 2 - zone n1 + zone_name = "zone n1" + + region_configs { electable_specs { instance_size = "M10" - node_count = 2 + node_count = 3 } - analytics_specs { + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + + region_configs { + electable_specs { instance_size = "M10" - node_count = 1 + node_count = 2 } - provider_name = "GCP" - priority = 0 - region_name = "US_EAST_4" + provider_name = "AZURE" + priority = 6 + region_name = "US_EAST_2" } } - replication_specs { # zone n2 - zone_name = "zone n2" - num_shards = 2 # 2-shard Multi-Cloud Cluster + replication_specs { # shard 1 - zone n2 + zone_name = "zone n2" - region_configs { # shard n1 + region_configs { electable_specs { instance_size = "M10" node_count = 3 } - analytics_specs { - instance_size = "M10" - node_count = 1 - } provider_name = "AWS" priority = 7 region_name = "EU_WEST_1" } - region_configs { # shard n2 + region_configs { electable_specs { instance_size = "M10" node_count = 2 } - analytics_specs { + provider_name = "AZURE" + priority = 6 + region_name = "EUROPE_NORTH" + } + } + + replication_specs { # shard 2 - zone n2 + zone_name = "zone n2" + + region_configs { + electable_specs { instance_size = "M10" - node_count = 1 + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + + region_configs { + electable_specs { + instance_size = "M10" + node_count = 2 } provider_name = "AZURE" priority = 6 diff --git a/examples/mongodbatlas_advanced_cluster/global-cluster/versions.tf b/examples/mongodbatlas_advanced_cluster/global-cluster/versions.tf index a8f13589d1..9b4be6c14c 100644 --- a/examples/mongodbatlas_advanced_cluster/global-cluster/versions.tf +++ b/examples/mongodbatlas_advanced_cluster/global-cluster/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { mongodbatlas = { source = "mongodb/mongodbatlas" - version = "~> 1.10.0" + version = "~> 1.18" } } required_version = ">= 1.0" diff --git a/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md b/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md index 4e356cb8c9..40f455fe0b 100644 --- a/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md +++ b/examples/mongodbatlas_advanced_cluster/multi-cloud/README.md @@ -1,5 +1,5 @@ # MongoDB Atlas Provider -- Multi-Cloud Advanced Cluster -This example creates a project and a Multi Cloud Advanced Cluster in all the available cloud providers. +This example creates a project and a Multi Cloud Advanced Cluster with 2 shards. ## Dependencies @@ -46,7 +46,7 @@ This project currently supports the below deployments: **3\. Execute the Terraform apply.** -Now execute the plan to provision the Federated settings resources. +Now execute the plan to provision the Atlas Project and Cluster resources. ``` bash $ terraform apply diff --git a/examples/mongodbatlas_advanced_cluster/multi-cloud/main.tf b/examples/mongodbatlas_advanced_cluster/multi-cloud/main.tf index b2e2136b35..4834de43ca 100644 --- a/examples/mongodbatlas_advanced_cluster/multi-cloud/main.tf +++ b/examples/mongodbatlas_advanced_cluster/multi-cloud/main.tf @@ -9,10 +9,8 @@ resource "mongodbatlas_advanced_cluster" "cluster" { cluster_type = "SHARDED" backup_enabled = true - replication_specs { - num_shards = 3 # 3-shard Multi-Cloud Cluster - - region_configs { # shard n1 + replication_specs { # shard 1 + region_configs { electable_specs { instance_size = "M10" node_count = 3 @@ -26,7 +24,7 @@ resource "mongodbatlas_advanced_cluster" "cluster" { region_name = "US_EAST_1" } - region_configs { # shard n2 + region_configs { electable_specs { instance_size = "M10" node_count = 2 @@ -39,8 +37,24 @@ resource "mongodbatlas_advanced_cluster" "cluster" { priority = 6 region_name = "US_EAST_2" } + } + + replication_specs { # shard 2 + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + analytics_specs { + instance_size = "M10" + node_count = 1 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } - region_configs { # shard n3 + region_configs { electable_specs { instance_size = "M10" node_count = 2 @@ -49,9 +63,9 @@ resource "mongodbatlas_advanced_cluster" "cluster" { instance_size = "M10" node_count = 1 } - provider_name = "GCP" - priority = 5 - region_name = "US_EAST_4" + provider_name = "AZURE" + priority = 6 + region_name = "US_EAST_2" } } diff --git a/examples/mongodbatlas_advanced_cluster/multi-cloud/versions.tf b/examples/mongodbatlas_advanced_cluster/multi-cloud/versions.tf index a8f13589d1..9b4be6c14c 100644 --- a/examples/mongodbatlas_advanced_cluster/multi-cloud/versions.tf +++ b/examples/mongodbatlas_advanced_cluster/multi-cloud/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { mongodbatlas = { source = "mongodb/mongodbatlas" - version = "~> 1.10.0" + version = "~> 1.18" } } required_version = ">= 1.0" diff --git a/examples/mongodbatlas_advanced_cluster/tenant-upgrade/main.tf b/examples/mongodbatlas_advanced_cluster/tenant-upgrade/main.tf index 3dad9ff507..863eb1b240 100644 --- a/examples/mongodbatlas_advanced_cluster/tenant-upgrade/main.tf +++ b/examples/mongodbatlas_advanced_cluster/tenant-upgrade/main.tf @@ -9,8 +9,6 @@ resource "mongodbatlas_advanced_cluster" "cluster" { cluster_type = "REPLICASET" replication_specs { - num_shards = 1 - region_configs { electable_specs { instance_size = var.provider_instance_size_name @@ -31,4 +29,4 @@ resource "mongodbatlas_advanced_cluster" "cluster" { resource "mongodbatlas_project" "project" { name = "TenantUpgradeTest" org_id = var.atlas_org_id -} \ No newline at end of file +} diff --git a/examples/mongodbatlas_cloud_backup_schedule/README.md b/examples/mongodbatlas_cloud_backup_schedule/README.md index 032a79c21e..97f6f949f3 100644 --- a/examples/mongodbatlas_cloud_backup_schedule/README.md +++ b/examples/mongodbatlas_cloud_backup_schedule/README.md @@ -42,7 +42,7 @@ $ terraform plan This project currently supports the below deployments: - MongoDB Atlas Project -- MongoDB Atlas Clusters (3 AWS M10 clusters in various regions) +- MongoDB Atlas Clusters (2 AWS M10 clusters in different regions) - MongoDB Cloud Backup Schedule(s) with various policies which is set up for each created cluster. **5\. Execute the Terraform apply.** diff --git a/examples/mongodbatlas_cloud_backup_schedule/main.tf b/examples/mongodbatlas_cloud_backup_schedule/main.tf index 7d81889e0d..4842cbfce3 100644 --- a/examples/mongodbatlas_cloud_backup_schedule/main.tf +++ b/examples/mongodbatlas_cloud_backup_schedule/main.tf @@ -2,7 +2,6 @@ locals { atlas_clusters = { "cluster_1" = { name = "m10-aws-1e", region = "US_EAST_1" }, "cluster_2" = { name = "m10-aws-2e", region = "US_EAST_2" }, - "cluster_3" = { name = "m10-aws-3w", region = "US_WEST_1" } } } @@ -18,8 +17,6 @@ resource "mongodbatlas_advanced_cluster" "automated_backup_test_cluster" { cluster_type = "REPLICASET" replication_specs { - num_shards = 1 - region_configs { electable_specs { instance_size = "M10" @@ -37,31 +34,45 @@ resource "mongodbatlas_advanced_cluster" "automated_backup_test_cluster" { } backup_enabled = true # enable cloud backup snapshots + pit_enabled = true } -resource "mongodbatlas_cloud_backup_schedule" "test" { - for_each = local.atlas_clusters - project_id = mongodbatlas_project.atlas-project.id - cluster_name = mongodbatlas_advanced_cluster.automated_backup_test_cluster[each.key].name +resource "mongodbatlas_cloud_backup_schedule" "test" { + for_each = local.atlas_clusters + project_id = mongodbatlas_project.atlas-project.id + cluster_name = mongodbatlas_advanced_cluster.automated_backup_test_cluster[each.key].name reference_hour_of_day = 3 reference_minute_of_hour = 45 restore_window_days = 4 + copy_settings { + cloud_provider = "AWS" + frequencies = ["HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY", + "YEARLY", + "ON_DEMAND"] + region_name = "US_WEST_1" + zone_id = mongodbatlas_advanced_cluster.automated_backup_test_cluster[each.key].replication_specs[0].zone_id[0] + should_copy_oplogs = true + } + policy_item_hourly { frequency_interval = 1 #accepted values = 1, 2, 4, 6, 8, 12 -> every n hours retention_unit = "days" - retention_value = 1 + retention_value = 4 } policy_item_daily { frequency_interval = 1 #accepted values = 1 -> every 1 day retention_unit = "days" - retention_value = 2 + retention_value = 4 } policy_item_weekly { frequency_interval = 4 # accepted values = 1 to 7 -> every 1=Monday,2=Tuesday,3=Wednesday,4=Thursday,5=Friday,6=Saturday,7=Sunday day of the week retention_unit = "weeks" - retention_value = 3 + retention_value = 4 } policy_item_monthly { frequency_interval = 5 # accepted values = 1 to 28 -> 1 to 28 every nth day of the month @@ -72,7 +83,7 @@ resource "mongodbatlas_cloud_backup_schedule" "test" { policy_item_yearly { frequency_interval = 1 # accepted values = 1 to 12 -> 1st day of nth month retention_unit = "years" - retention_value = 1 + retention_value = 4 } depends_on = [ diff --git a/examples/mongodbatlas_cluster/nvme-upgrade/README.md b/examples/mongodbatlas_cluster/nvme-upgrade/README.md index 6c6ab3878e..2c1b5c8e87 100644 --- a/examples/mongodbatlas_cluster/nvme-upgrade/README.md +++ b/examples/mongodbatlas_cluster/nvme-upgrade/README.md @@ -1,4 +1,5 @@ # MongoDB Atlas Provider -- Cluster NVME (Non-Volatile Memory Express) Upgrade + This example creates a project and cluster. It is intended to show how to upgrade from Standard, to PROVISIONED storage tier. Variables Required: @@ -37,4 +38,4 @@ provider_name = "AWS" provider_instance_size_name = "M40_NVME" provider_volume_type = "PROVISIONED" provider_disk_iops = 135125 -``` \ No newline at end of file +``` diff --git a/examples/mongodbatlas_cluster/tenant-upgrade/README.md b/examples/mongodbatlas_cluster/tenant-upgrade/README.md index 34c91e11a5..77a7879ff5 100644 --- a/examples/mongodbatlas_cluster/tenant-upgrade/README.md +++ b/examples/mongodbatlas_cluster/tenant-upgrade/README.md @@ -1,4 +1,5 @@ # MongoDB Atlas Provider -- Cluster Tenant Upgrade + This example creates a project and cluster. It is intended to show how to upgrade from shared, aka tenant, to dedicated tier. Variables Required: @@ -30,4 +31,4 @@ atlas_org_id = "627a9687f7f7f7f774de306f14" public_key = private_key = provider_name = "GCP" -provider_instance_size_name = "M10" \ No newline at end of file +provider_instance_size_name = "M10" diff --git a/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/README.md b/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/README.md index 4b98ed2449..60b16d1e2b 100644 --- a/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/README.md +++ b/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/README.md @@ -4,8 +4,8 @@ Setup [regionalized private endpoints](https://www.mongodb.com/docs/atlas/securi ## Gotchas -- Ensure `mongodbatlas_cluster` depends_on `mongodbatlas_private_endpoint_regional_mode` -- Despite being properly output, connection strings _may not be applied_ to `mongodbatlas_cluster` resource when changing regional mode enabled. This means the connection_strings may not exist in terraform state until the next `terraform apply`. +- Ensure `mongodbatlas_advanced_cluster` depends_on `mongodbatlas_private_endpoint_regional_mode` +- Despite being properly output, connection strings _may not be applied_ to `mongodbatlas_advanced_cluster` resource when changing regional mode enabled. This means the `connection_strings` may not exist in the Terraform state until the next `terraform apply`. ## Dependencies @@ -83,10 +83,10 @@ $ terraform destroy **What's the resource dependency chain?** 1. `mongodbatlas_project` must exist for any of the following -2. `mongodbatlas_privatelink_endpoint` is dependent on the `mongodbatlas_project` -3. `aws_vpc_endpoint` is dependent on the `mongodbatlas_privatelink_endpoint`, and its dependencies. -4. `mongodbatlas_privatelink_endpoint_service` is dependent on `aws_vpc_endpoint` and its dependencies. -5. `mongodbatlas_cluster` is dependent only on the `mongodbatlas_project`, howerver; its `connection_strings` are sourced from the `mongodbatlas_privatelink_endpoint_service`. `mongodbatlas_privatelink_endpoint_service` has explicitly been added to the `mongodbatlas_cluster` `depends_on` to ensure the private connection strings are correct following `terraform apply`. +2. `mongodbatlas_privatelink_endpoint` depends on `mongodbatlas_project` +3. `aws_vpc_endpoint` depends on `mongodbatlas_privatelink_endpoint`. +4. `mongodbatlas_privatelink_endpoint_service` depends on `aws_vpc_endpoint`. +5. `mongodbatlas_advanced_cluster` depends only on `mongodbatlas_project`. However, its `connection_strings` are sourced from `mongodbatlas_privatelink_endpoint_service`. Add `mongodbatlas_privatelink_endpoint_service` explicitly to `mongodbatlas_advanced_cluster.depends_on` to ensure that the private connection strings are correct when running `terraform apply`. **Important Point** @@ -139,7 +139,7 @@ If you've properly enabled regionalized private endpoints, `connection_strings` To output the `srv_connection_string`s, follow the [example output.tf](output.tf): ``` locals { - private_endpoints = flatten([for cs in mongodbatlas_cluster.geosharded.connection_strings : cs.private_endpoint]) + private_endpoints = flatten([for cs in mongodbatlas_advanced_cluster.geosharded.connection_strings : cs.private_endpoint]) connection_strings_east = [ for pe in local.private_endpoints : pe.srv_connection_string diff --git a/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/atlas-cluster.tf b/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/atlas-cluster.tf index fe648663b9..f5c12d9d99 100644 --- a/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/atlas-cluster.tf +++ b/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/atlas-cluster.tf @@ -1,31 +1,56 @@ -resource "mongodbatlas_cluster" "geosharded" { - project_id = var.project_id - name = var.cluster_name - cloud_backup = true - auto_scaling_disk_gb_enabled = true - mongo_db_major_version = "7.0" - cluster_type = "GEOSHARDED" - replication_specs { - zone_name = "Zone 1" - num_shards = 2 - regions_config { - region_name = var.atlas_region_east - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 +resource "mongodbatlas_advanced_cluster" "geosharded" { + project_id = var.project_id + name = var.cluster_name + cluster_type = "GEOSHARDED" + backup_enabled = true + + replication_specs { # Shard 1 + zone_name = "Zone 1" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = var.atlas_region_east } - regions_config { - region_name = var.atlas_region_west - electable_nodes = 2 - priority = 6 - read_only_nodes = 0 + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 2 + } + provider_name = "AWS" + priority = 6 + region_name = var.atlas_region_west } } - # Provider settings - provider_name = "AWS" - disk_size_gb = 80 - provider_instance_size_name = "M30" + replication_specs { # Shard 2 + zone_name = "Zone 1" + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = var.atlas_region_east + } + + region_configs { + electable_specs { + instance_size = "M30" + node_count = 2 + } + provider_name = "AWS" + priority = 6 + region_name = var.atlas_region_west + } + } depends_on = [ mongodbatlas_privatelink_endpoint_service.pe_east_service, diff --git a/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/output.tf b/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/output.tf index 6fe00dd590..3de3074a55 100644 --- a/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/output.tf +++ b/examples/mongodbatlas_privatelink_endpoint/aws/cluster-geosharded/output.tf @@ -1,5 +1,5 @@ locals { - private_endpoints = flatten([for cs in mongodbatlas_cluster.geosharded.connection_strings : cs.private_endpoint]) + private_endpoints = flatten([for cs in mongodbatlas_advanced_cluster.geosharded.connection_strings : cs.private_endpoint]) connection_strings_east = [ for pe in local.private_endpoints : pe.srv_connection_string diff --git a/examples/mongodbatlas_privatelink_endpoint_service_serverless/aws/atlas-cluster.tf b/examples/mongodbatlas_privatelink_endpoint_service_serverless/aws/atlas-cluster.tf index 8310c53001..dc4ba8a1b6 100644 --- a/examples/mongodbatlas_privatelink_endpoint_service_serverless/aws/atlas-cluster.tf +++ b/examples/mongodbatlas_privatelink_endpoint_service_serverless/aws/atlas-cluster.tf @@ -1,6 +1,6 @@ resource "mongodbatlas_serverless_instance" "cluster_atlas" { project_id = var.atlasprojectid - name = "cluster-atlas" + name = "ClusterAtlas" provider_settings_backing_provider_name = "AWS" provider_settings_provider_name = "SERVERLESS" provider_settings_region_name = "US_EAST_1" diff --git a/go.mod b/go.mod index 291b2d375b..65e23141f6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/mongodb/terraform-provider-mongodbatlas -go 1.22 +go 1.23 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 @@ -8,7 +8,7 @@ require ( github.com/hashicorp/go-changelog v0.0.0-20240318095659-4d68c58a6e7f github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-version v1.7.0 - github.com/hashicorp/hcl/v2 v2.21.0 + github.com/hashicorp/hcl/v2 v2.22.0 github.com/hashicorp/terraform-plugin-framework v1.10.0 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 @@ -22,9 +22,9 @@ require ( github.com/spf13/cast v1.6.0 github.com/stretchr/testify v1.9.0 github.com/zclconf/go-cty v1.15.0 - go.mongodb.org/atlas v0.36.0 + go.mongodb.org/atlas v0.37.0 go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 - go.mongodb.org/atlas-sdk/v20240805001 v20240805001.0.0 + go.mongodb.org/atlas-sdk/v20240805003 v20240805003.0.0 go.mongodb.org/realm v0.1.0 ) diff --git a/go.sum b/go.sum index 2910145caf..8c698c5820 100644 --- a/go.sum +++ b/go.sum @@ -506,8 +506,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= -github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hashicorp/hcl/v2 v2.22.0 h1:hkZ3nCtqeJsDhPRFz5EA9iwcG1hNWGePOTw6oyul12M= +github.com/hashicorp/hcl/v2 v2.22.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= @@ -778,12 +778,12 @@ github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmB github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.mongodb.org/atlas v0.12.0/go.mod h1:wVCnHcm/7/IfTjEB6K8K35PLG70yGz8BdkRwX0oK9/M= -go.mongodb.org/atlas v0.36.0 h1:m05S3AO7zkl+bcG1qaNsEKBnAqnKx2FDwLooHpIG3j4= -go.mongodb.org/atlas v0.36.0/go.mod h1:nfPldE9dSama6G2IbIzmEza02Ly7yFZjMMVscaM0uEc= +go.mongodb.org/atlas v0.37.0 h1:zQnO1o5+bVP9IotpAYpres4UjMD2F4nwNEFTZhNL4ck= +go.mongodb.org/atlas v0.37.0/go.mod h1:DJYtM+vsEpPEMSkQzJnFHrT0sP7ev6cseZc/GGjJYG8= go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 h1:d/gbYJ+obR0EM/3DZf7+ZMi2QWISegm3mid7Or708cc= go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0/go.mod h1:O47ZrMMfcWb31wznNIq2PQkkdoFoK0ea2GlmRqGJC2s= -go.mongodb.org/atlas-sdk/v20240805001 v20240805001.0.0 h1:EwA2g7i4JYc0b/oE7zvvOH+POYVrHrWR7BONex3MFTA= -go.mongodb.org/atlas-sdk/v20240805001 v20240805001.0.0/go.mod h1:0aHEphVfsYbpg3CiEUcXeAU7OVoOFig1tltXdLjYiSQ= +go.mongodb.org/atlas-sdk/v20240805003 v20240805003.0.0 h1:f2PRtW3r9873dGApUXwf/njVT2uWpXtGw9Pg9czMX5I= +go.mongodb.org/atlas-sdk/v20240805003 v20240805003.0.0/go.mod h1:CVDolHhHTrXPPqig+7KKTPu54tIVqsrtmQm4LssNcZ0= go.mongodb.org/realm v0.1.0 h1:zJiXyLaZrznQ+Pz947ziSrDKUep39DO4SfA0Fzx8M4M= go.mongodb.org/realm v0.1.0/go.mod h1:4Vj6iy+Puo1TDERcoh4XZ+pjtwbOzPpzqy3Cwe8ZmDM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= diff --git a/internal/common/constant/deprecation.go b/internal/common/constant/deprecation.go index 11c4a0650f..72211322d2 100644 --- a/internal/common/constant/deprecation.go +++ b/internal/common/constant/deprecation.go @@ -1,8 +1,11 @@ package constant const ( + DeprecationParam = "This parameter is deprecated." + DeprecationParamWithReplacement = "This parameter is deprecated. Please transition to %s." DeprecationParamByDate = "This parameter is deprecated and will be removed by %s." DeprecationParamByDateWithReplacement = "This parameter is deprecated and will be removed by %s. Please transition to %s." + DeprecationParamFutureWithReplacement = "This parameter is deprecated and will be removed in the future. Please transition to %s" DeprecationParamByVersion = "This parameter is deprecated and will be removed in version %s." DeprecationResourceByDateWithReplacement = "This resource is deprecated and will be removed in %s. Please transition to %s." DeprecationDataSourceByDateWithReplacement = "This data source is deprecated and will be removed in %s. Please transition to %s." diff --git a/internal/common/conversion/flatten_expand.go b/internal/common/conversion/flatten_expand.go index 14148596e3..4a36480cda 100644 --- a/internal/common/conversion/flatten_expand.go +++ b/internal/common/conversion/flatten_expand.go @@ -3,8 +3,7 @@ package conversion import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func FlattenLinks(links []admin.Link) []map[string]string { @@ -29,17 +28,6 @@ func FlattenTags(tags []admin.ResourceTag) []map[string]string { return ret } -func FlattenTagsOldSDK(tags []admin20240530.ResourceTag) []map[string]string { - ret := make([]map[string]string, len(tags)) - for i, tag := range tags { - ret[i] = map[string]string{ - "key": tag.GetKey(), - "value": tag.GetValue(), - } - } - return ret -} - func ExpandTagsFromSetSchema(d *schema.ResourceData) *[]admin.ResourceTag { list := d.Get("tags").(*schema.Set) ret := make([]admin.ResourceTag, list.Len()) @@ -53,20 +41,6 @@ func ExpandTagsFromSetSchema(d *schema.ResourceData) *[]admin.ResourceTag { return &ret } -// this will be removed once ISS dev branch is merged -func ExpandTagsFromSetSchemaOldSDK(d *schema.ResourceData) *[]admin20240530.ResourceTag { - list := d.Get("tags").(*schema.Set) - ret := make([]admin20240530.ResourceTag, list.Len()) - for i, item := range list.List() { - tag := item.(map[string]any) - ret[i] = admin20240530.ResourceTag{ - Key: tag["key"].(string), - Value: tag["value"].(string), - } - } - return &ret -} - func ExpandStringList(list []any) (res []string) { for _, v := range list { res = append(res, v.(string)) diff --git a/internal/config/client.go b/internal/config/client.go index 8b31a10ccd..dfcec11ba2 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -10,7 +10,7 @@ import ( "time" admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" matlasClient "go.mongodb.org/atlas/mongodbatlas" realmAuth "go.mongodb.org/realm/auth" "go.mongodb.org/realm/realm" diff --git a/internal/service/accesslistapikey/data_source_accesslist_api_keys.go b/internal/service/accesslistapikey/data_source_accesslist_api_keys.go index 0ce79a22d4..62effe0dc0 100644 --- a/internal/service/accesslistapikey/data_source_accesslist_api_keys.go +++ b/internal/service/accesslistapikey/data_source_accesslist_api_keys.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/accesslistapikey/resource_access_list_api_key.go b/internal/service/accesslistapikey/resource_access_list_api_key.go index f099ec0e14..a5c4bdc2a9 100644 --- a/internal/service/accesslistapikey/resource_access_list_api_key.go +++ b/internal/service/accesslistapikey/resource_access_list_api_key.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/advancedcluster/data_source_advanced_cluster.go b/internal/service/advancedcluster/data_source_advanced_cluster.go index 32b95da947..c81b5e3ceb 100644 --- a/internal/service/advancedcluster/data_source_advanced_cluster.go +++ b/internal/service/advancedcluster/data_source_advanced_cluster.go @@ -5,10 +5,12 @@ import ( "fmt" "net/http" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -20,6 +22,10 @@ func DataSource() *schema.Resource { Type: schema.TypeString, Required: true, }, + "use_replication_spec_per_shard": { + Type: schema.TypeBool, + Optional: true, + }, "advanced_configuration": SchemaAdvancedConfigDS(), "backup_enabled": { Type: schema.TypeBool, @@ -53,8 +59,9 @@ func DataSource() *schema.Resource { Computed: true, }, "disk_size_gb": { - Type: schema.TypeFloat, - Computed: true, + Type: schema.TypeFloat, + Computed: true, + Deprecated: DeprecationMsgOldSchema, }, "encryption_at_rest_provider": { Type: schema.TypeString, @@ -63,7 +70,7 @@ func DataSource() *schema.Resource { "labels": { Type: schema.TypeSet, Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByDateWithReplacement, "September 2024", "tags"), + Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -105,13 +112,23 @@ func DataSource() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { + Type: schema.TypeString, + Computed: true, + Deprecated: DeprecationMsgOldSchema, + }, + "zone_id": { Type: schema.TypeString, Computed: true, }, - "num_shards": { - Type: schema.TypeInt, + "external_id": { + Type: schema.TypeString, Computed: true, }, + "num_shards": { + Type: schema.TypeInt, + Computed: true, + Deprecated: DeprecationMsgOldSchema, + }, "region_configs": { Type: schema.TypeList, Computed: true, @@ -235,99 +252,88 @@ func DataSource() *schema.Resource { func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 + projectID := d.Get("project_id").(string) clusterName := d.Get("name").(string) + useReplicationSpecPerShard := false + var replicationSpecs []map[string]any + var clusterID string - cluster, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - return nil - } - return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) - } - - if err := d.Set("backup_enabled", cluster.GetBackupEnabled()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "backup_enabled", clusterName, err)) + if v, ok := d.GetOk("use_replication_spec_per_shard"); ok { + useReplicationSpecPerShard = v.(bool) } - if err := d.Set("bi_connector_config", flattenBiConnectorConfig(cluster.GetBiConnector())); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "bi_connector_config", clusterName, err)) - } - - if err := d.Set("cluster_type", cluster.GetClusterType()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "cluster_type", clusterName, err)) - } - - if err := d.Set("connection_strings", flattenConnectionStrings(cluster.GetConnectionStrings())); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "connection_strings", clusterName, err)) - } - - if err := d.Set("create_date", conversion.TimePtrToStringPtr(cluster.CreateDate)); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "create_date", clusterName, err)) - } + if !useReplicationSpecPerShard { + clusterDescOld, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + if resp != nil { + if resp.StatusCode == http.StatusNotFound { + return nil + } + if admin20240530.IsErrorCode(err, "ASYMMETRIC_SHARD_UNSUPPORTED") { + return diag.FromErr(fmt.Errorf("please add `use_replication_spec_per_shard = true` to your data source configuration to enable asymmetric shard support. Refer to documentation for more details. %s", err)) + } + } + return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) + } - if err := d.Set("disk_size_gb", cluster.GetDiskSizeGB()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) - } + clusterID = clusterDescOld.GetId() - if err := d.Set("encryption_at_rest_provider", cluster.GetEncryptionAtRestProvider()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "encryption_at_rest_provider", clusterName, err)) - } + if err := d.Set("disk_size_gb", clusterDescOld.GetDiskSizeGB()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) + } - if err := d.Set("labels", flattenLabels(cluster.GetLabels())); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "labels", clusterName, err)) - } + zoneNameToZoneIDs, err := getZoneIDsFromNewAPI(ctx, projectID, clusterName, connV2) + if err != nil { + return diag.FromErr(err) + } - if err := d.Set("tags", conversion.FlattenTagsOldSDK(cluster.GetTags())); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "tags", clusterName, err)) - } + replicationSpecs, err = FlattenAdvancedReplicationSpecsOldSDK(ctx, clusterDescOld.GetReplicationSpecs(), zoneNameToZoneIDs, clusterDescOld.GetDiskSizeGB(), d.Get("replication_specs").([]any), d, connV2) + if err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) + } - if err := d.Set("mongo_db_major_version", cluster.GetMongoDBMajorVersion()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "mongo_db_major_version", clusterName, err)) - } + diags := setRootFields(d, convertClusterDescToLatestExcludeRepSpecs(clusterDescOld), false) + if diags.HasError() { + return diags + } + } else { + clusterDescLatest, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil + } + return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) + } - if err := d.Set("mongo_db_version", cluster.GetMongoDBVersion()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "mongo_db_version", clusterName, err)) - } + clusterID = clusterDescLatest.GetId() - if err := d.Set("name", cluster.GetName()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "name", clusterName, err)) - } + // root disk_size_gb defined for backwards compatibility avoiding breaking changes + if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(clusterDescLatest)); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) + } - if err := d.Set("paused", cluster.GetPaused()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "paused", clusterName, err)) - } + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) + if err != nil { + return diag.FromErr(err) + } - if err := d.Set("pit_enabled", cluster.GetPitEnabled()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "pit_enabled", clusterName, err)) - } + replicationSpecs, err = flattenAdvancedReplicationSpecsDS(ctx, clusterDescLatest.GetReplicationSpecs(), zoneNameToOldReplicationSpecIDs, d, connV2) + if err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) + } - replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV220240530) - if err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) + diags := setRootFields(d, clusterDescLatest, false) + if diags.HasError() { + return diags + } } if err := d.Set("replication_specs", replicationSpecs); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } - if err := d.Set("root_cert_type", cluster.GetRootCertType()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "state_name", clusterName, err)) - } - - if err := d.Set("state_name", cluster.GetStateName()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "state_name", clusterName, err)) - } - if err := d.Set("termination_protection_enabled", cluster.GetTerminationProtectionEnabled()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "termination_protection_enabled", clusterName, err)) - } - if err := d.Set("version_release_system", cluster.GetVersionReleaseSystem()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "version_release_system", clusterName, err)) - } - if err := d.Set("global_cluster_self_managed_sharding", cluster.GetGlobalClusterSelfManagedSharding()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err)) - } - processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { return diag.FromErr(fmt.Errorf(ErrorAdvancedConfRead, clusterName, err)) @@ -337,6 +343,6 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "advanced_configuration", clusterName, err)) } - d.SetId(cluster.GetId()) + d.SetId(clusterID) return nil } diff --git a/internal/service/advancedcluster/data_source_advanced_clusters.go b/internal/service/advancedcluster/data_source_advanced_clusters.go index e358dd9b5e..b9e7b1f877 100644 --- a/internal/service/advancedcluster/data_source_advanced_clusters.go +++ b/internal/service/advancedcluster/data_source_advanced_clusters.go @@ -6,13 +6,20 @@ import ( "log" "net/http" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" +) + +const ( + errorListRead = "error reading advanced cluster list for project(%s): %s" ) func PluralDataSource() *schema.Resource { @@ -23,6 +30,10 @@ func PluralDataSource() *schema.Resource { Type: schema.TypeString, Required: true, }, + "use_replication_spec_per_shard": { + Type: schema.TypeBool, + Optional: true, + }, "results": { Type: schema.TypeList, Computed: true, @@ -61,8 +72,9 @@ func PluralDataSource() *schema.Resource { Computed: true, }, "disk_size_gb": { - Type: schema.TypeFloat, - Computed: true, + Type: schema.TypeFloat, + Computed: true, + Deprecated: DeprecationMsgOldSchema, }, "encryption_at_rest_provider": { Type: schema.TypeString, @@ -71,7 +83,7 @@ func PluralDataSource() *schema.Resource { "labels": { Type: schema.TypeSet, Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByDateWithReplacement, "September 2024", "tags"), + Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -113,13 +125,23 @@ func PluralDataSource() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { + Type: schema.TypeString, + Computed: true, + Deprecated: DeprecationMsgOldSchema, + }, + "zone_id": { Type: schema.TypeString, Computed: true, }, - "num_shards": { - Type: schema.TypeInt, + "external_id": { + Type: schema.TypeString, Computed: true, }, + "num_shards": { + Type: schema.TypeInt, + Computed: true, + Deprecated: DeprecationMsgOldSchema, + }, "region_configs": { Type: schema.TypeList, Computed: true, @@ -246,24 +268,51 @@ func PluralDataSource() *schema.Resource { func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) + useReplicationSpecPerShard := false + d.SetId(id.UniqueId()) - list, resp, err := connV220240530.ClustersApi.ListClusters(ctx, projectID).Execute() - if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - return nil - } - return diag.FromErr(fmt.Errorf("error reading advanced cluster list for project(%s): %s", projectID, err)) - } - if err := d.Set("results", flattenAdvancedClusters(ctx, connV220240530, list.GetResults(), d)); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "results", d.Id(), err)) + if v, ok := d.GetOk("use_replication_spec_per_shard"); ok { + useReplicationSpecPerShard = v.(bool) } + if !useReplicationSpecPerShard { + list, resp, err := connV220240530.ClustersApi.ListClusters(ctx, projectID).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil + } + return diag.FromErr(fmt.Errorf(errorListRead, projectID, err)) + } + results, diags := flattenAdvancedClustersOldSDK(ctx, connV220240530, connV2, list.GetResults(), d) + if len(diags) > 0 { + return diags + } + if err := d.Set("results", results); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "results", d.Id(), err)) + } + } else { + list, resp, err := connV2.ClustersApi.ListClusters(ctx, projectID).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil + } + return diag.FromErr(fmt.Errorf(errorListRead, projectID, err)) + } + results, diags := flattenAdvancedClusters(ctx, connV220240530, connV2, list.GetResults(), d) + if len(diags) > 0 { + return diags + } + if err := d.Set("results", results); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "results", d.Id(), err)) + } + } return nil } -func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530.APIClient, clusters []admin20240530.AdvancedClusterDescription, d *schema.ResourceData) []map[string]any { +func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530.APIClient, connV2 *admin.APIClient, clusters []admin.ClusterDescription20240805, d *schema.ResourceData) ([]map[string]any, diag.Diagnostics) { results := make([]map[string]any, 0, len(clusters)) for i := range clusters { cluster := &clusters[i] @@ -271,7 +320,13 @@ func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530. if err != nil { log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", cluster.GetId(), err) } - replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), nil, d, connV220240530) + + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, cluster.GetGroupId(), cluster.GetName(), connV220240530) + if err != nil { + return nil, diag.FromErr(err) + } + + replicationSpecs, err := flattenAdvancedReplicationSpecsDS(ctx, cluster.GetReplicationSpecs(), zoneNameToOldReplicationSpecIDs, d, connV2) if err != nil { log.Printf("[WARN] Error setting `replication_specs` for the cluster(%s): %s", cluster.GetId(), err) } @@ -279,14 +334,61 @@ func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530. result := map[string]any{ "advanced_configuration": flattenProcessArgs(processArgs), "backup_enabled": cluster.GetBackupEnabled(), - "bi_connector_config": flattenBiConnectorConfig(cluster.GetBiConnector()), + "bi_connector_config": flattenBiConnectorConfig(cluster.BiConnector), "cluster_type": cluster.GetClusterType(), "create_date": conversion.TimePtrToStringPtr(cluster.CreateDate), "connection_strings": flattenConnectionStrings(cluster.GetConnectionStrings()), - "disk_size_gb": cluster.GetDiskSizeGB(), + "disk_size_gb": GetDiskSizeGBFromReplicationSpec(cluster), "encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(), "labels": flattenLabels(cluster.GetLabels()), - "tags": conversion.FlattenTagsOldSDK(cluster.GetTags()), + "tags": conversion.FlattenTags(cluster.GetTags()), + "mongo_db_major_version": cluster.GetMongoDBMajorVersion(), + "mongo_db_version": cluster.GetMongoDBVersion(), + "name": cluster.GetName(), + "paused": cluster.GetPaused(), + "pit_enabled": cluster.GetPitEnabled(), + "replication_specs": replicationSpecs, + "root_cert_type": cluster.GetRootCertType(), + "state_name": cluster.GetStateName(), + "termination_protection_enabled": cluster.GetTerminationProtectionEnabled(), + "version_release_system": cluster.GetVersionReleaseSystem(), + "global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(), + } + results = append(results, result) + } + return results, nil +} + +func flattenAdvancedClustersOldSDK(ctx context.Context, connV20240530 *admin20240530.APIClient, connV2 *admin.APIClient, clusters []admin20240530.AdvancedClusterDescription, d *schema.ResourceData) ([]map[string]any, diag.Diagnostics) { + results := make([]map[string]any, 0, len(clusters)) + for i := range clusters { + cluster := &clusters[i] + processArgs, _, err := connV20240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, cluster.GetGroupId(), cluster.GetName()).Execute() + if err != nil { + log.Printf("[WARN] Error setting `advanced_configuration` for the cluster(%s): %s", cluster.GetId(), err) + } + + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, cluster.GetGroupId(), cluster.GetName(), connV20240530) + if err != nil { + return nil, diag.FromErr(err) + } + + replicationSpecs, err := FlattenAdvancedReplicationSpecsOldSDK(ctx, cluster.GetReplicationSpecs(), zoneNameToOldReplicationSpecIDs, cluster.GetDiskSizeGB(), nil, d, connV2) + if err != nil { + log.Printf("[WARN] Error setting `replication_specs` for the cluster(%s): %s", cluster.GetId(), err) + } + + result := map[string]any{ + "advanced_configuration": flattenProcessArgs(processArgs), + "backup_enabled": cluster.GetBackupEnabled(), + "bi_connector_config": flattenBiConnectorConfig(convertBiConnectToLatest(cluster.BiConnector)), + "cluster_type": cluster.GetClusterType(), + "create_date": conversion.TimePtrToStringPtr(cluster.CreateDate), + "connection_strings": flattenConnectionStrings(*convertConnectionStringToLatest(cluster.ConnectionStrings)), + "disk_size_gb": cluster.GetDiskSizeGB(), + "encryption_at_rest_provider": cluster.GetEncryptionAtRestProvider(), + "labels": flattenLabels(*convertLabelsToLatest(cluster.Labels)), + "tags": conversion.FlattenTags(convertTagsToLatest(cluster.GetTags())), "mongo_db_major_version": cluster.GetMongoDBMajorVersion(), "mongo_db_version": cluster.GetMongoDBVersion(), "name": cluster.GetName(), @@ -301,5 +403,5 @@ func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530. } results = append(results, result) } - return results + return results, nil } diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 0d5feb4d84..04bd6f0ec3 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -9,13 +9,16 @@ import ( "slices" "strings" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spf13/cast" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "github.com/spf13/cast" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) var ( @@ -60,16 +63,18 @@ func SchemaAdvancedConfigDS() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default_read_concern": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: DeprecationMsgOldSchema, }, "default_write_concern": { Type: schema.TypeString, Computed: true, }, "fail_index_key_too_long": { - Type: schema.TypeBool, - Computed: true, + Type: schema.TypeBool, + Computed: true, + Deprecated: DeprecationMsgOldSchema, }, "javascript_enabled": { Type: schema.TypeBool, @@ -188,9 +193,10 @@ func SchemaAdvancedConfig() *schema.Schema { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default_read_concern": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: DeprecationMsgOldSchema, }, "default_write_concern": { Type: schema.TypeString, @@ -198,9 +204,10 @@ func SchemaAdvancedConfig() *schema.Schema { Computed: true, }, "fail_index_key_too_long": { - Type: schema.TypeBool, - Optional: true, - Computed: true, + Type: schema.TypeBool, + Optional: true, + Computed: true, + Deprecated: DeprecationMsgOldSchema, }, "javascript_enabled": { Type: schema.TypeBool, @@ -275,7 +282,21 @@ func IsSharedTier(instanceSize string) bool { return instanceSize == "M0" || instanceSize == "M2" || instanceSize == "M5" } -func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin20240530.ClustersApi) retry.StateRefreshFunc { +// GetDiskSizeGBFromReplicationSpec obtains the diskSizeGB value by looking into the electable spec of the first replication spec. +// Independent storage size scaling is not supported (CLOUDP-201331), meaning all electable/analytics/readOnly configs in all replication specs are the same. +func GetDiskSizeGBFromReplicationSpec(cluster *admin.ClusterDescription20240805) float64 { + specs := cluster.GetReplicationSpecs() + if len(specs) < 1 { + return 0 + } + configs := specs[0].GetRegionConfigs() + if len(configs) < 1 { + return 0 + } + return configs[0].ElectableSpecs.GetDiskSizeGB() +} + +func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin.ClustersApi) retry.StateRefreshFunc { return func() (any, string, error) { cluster, resp, err := client.GetCluster(ctx, projectID, name).Execute() @@ -300,7 +321,7 @@ func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admi } } -func ResourceClusterListAdvancedRefreshFunc(ctx context.Context, projectID string, clustersAPI admin20240530.ClustersApi) retry.StateRefreshFunc { +func ResourceClusterListAdvancedRefreshFunc(ctx context.Context, projectID string, clustersAPI admin.ClustersApi) retry.StateRefreshFunc { return func() (any, string, error) { clusters, resp, err := clustersAPI.ListClusters(ctx, projectID).Execute() @@ -339,7 +360,7 @@ func FormatMongoDBMajorVersion(val any) string { return fmt.Sprintf("%.1f", cast.ToFloat32(val)) } -func flattenLabels(l []admin20240530.ComponentLabel) []map[string]string { +func flattenLabels(l []admin.ComponentLabel) []map[string]string { labels := make([]map[string]string, 0, len(l)) for _, item := range l { if item.GetKey() == ignoreLabel { @@ -353,7 +374,19 @@ func flattenLabels(l []admin20240530.ComponentLabel) []map[string]string { return labels } -func flattenConnectionStrings(str admin20240530.ClusterConnectionStrings) []map[string]any { +func flattenTags(tags *[]admin.ResourceTag) []map[string]string { + tagSlice := *tags + ret := make([]map[string]string, len(tagSlice)) + for i, tag := range tagSlice { + ret[i] = map[string]string{ + "key": tag.GetKey(), + "value": tag.GetValue(), + } + } + return ret +} + +func flattenConnectionStrings(str admin.ClusterConnectionStrings) []map[string]any { return []map[string]any{ { "standard": str.GetStandard(), @@ -365,7 +398,7 @@ func flattenConnectionStrings(str admin20240530.ClusterConnectionStrings) []map[ } } -func flattenPrivateEndpoint(privateEndpoints []admin20240530.ClusterDescriptionConnectionStringsPrivateEndpoint) []map[string]any { +func flattenPrivateEndpoint(privateEndpoints []admin.ClusterDescriptionConnectionStringsPrivateEndpoint) []map[string]any { endpoints := make([]map[string]any, 0, len(privateEndpoints)) for _, endpoint := range privateEndpoints { endpoints = append(endpoints, map[string]any{ @@ -379,7 +412,7 @@ func flattenPrivateEndpoint(privateEndpoints []admin20240530.ClusterDescriptionC return endpoints } -func flattenEndpoints(listEndpoints []admin20240530.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) []map[string]any { +func flattenEndpoints(listEndpoints []admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) []map[string]any { endpoints := make([]map[string]any, 0, len(listEndpoints)) for _, endpoint := range listEndpoints { endpoints = append(endpoints, map[string]any{ @@ -391,7 +424,7 @@ func flattenEndpoints(listEndpoints []admin20240530.ClusterDescriptionConnection return endpoints } -func flattenBiConnectorConfig(biConnector admin20240530.BiConnector) []map[string]any { +func flattenBiConnectorConfig(biConnector *admin.BiConnector) []map[string]any { return []map[string]any{ { "enabled": biConnector.GetEnabled(), @@ -400,11 +433,11 @@ func flattenBiConnectorConfig(biConnector admin20240530.BiConnector) []map[strin } } -func expandBiConnectorConfig(d *schema.ResourceData) *admin20240530.BiConnector { +func expandBiConnectorConfig(d *schema.ResourceData) *admin.BiConnector { if v, ok := d.GetOk("bi_connector_config"); ok { if biConn := v.([]any); len(biConn) > 0 { biConnMap := biConn[0].(map[string]any) - return &admin20240530.BiConnector{ + return &admin.BiConnector{ Enabled: conversion.Pointer(cast.ToBool(biConnMap["enabled"])), ReadPreference: conversion.StringPtr(cast.ToString(biConnMap["read_preference"])), } @@ -434,8 +467,35 @@ func flattenProcessArgs(p *admin20240530.ClusterDescriptionProcessArgs) []map[st } } -func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin20240530.ReplicationSpec, tfMapObjects []any, - d *schema.ResourceData, connV220240530 *admin20240530.APIClient) ([]map[string]any, error) { +func FlattenAdvancedReplicationSpecsOldSDK(ctx context.Context, apiObjects []admin20240530.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObjects []any, + d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { + // for flattening old model we need information of value defined at root disk_size_gb so we set the value in new location under hardware specs + replicationSpecFlattener := func(ctx context.Context, sdkModel *admin20240530.ReplicationSpec, tfModel map[string]any, resourceData *schema.ResourceData, client *admin.APIClient) (map[string]any, error) { + return flattenAdvancedReplicationSpecOldSDK(ctx, sdkModel, zoneNameToZoneIDs, rootDiskSizeGB, tfModel, resourceData, connV2) + } + return flattenAdvancedReplicationSpecsLogic[admin20240530.ReplicationSpec](ctx, apiObjects, tfMapObjects, d, + doesAdvancedReplicationSpecMatchAPIOldSDK, replicationSpecFlattener, connV2) +} + +func flattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, tfMapObjects []any, + d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { + // for flattening new model we need information of replication spec ids associated to old API to avoid breaking changes for users referencing replication_specs.*.id + replicationSpecFlattener := func(ctx context.Context, sdkModel *admin.ReplicationSpec20240805, tfModel map[string]any, resourceData *schema.ResourceData, client *admin.APIClient) (map[string]any, error) { + return flattenAdvancedReplicationSpec(ctx, sdkModel, zoneNameToOldReplicationSpecIDs, tfModel, resourceData, connV2) + } + return flattenAdvancedReplicationSpecsLogic[admin.ReplicationSpec20240805](ctx, apiObjects, tfMapObjects, d, + doesAdvancedReplicationSpecMatchAPI, replicationSpecFlattener, connV2) +} + +type ReplicationSpecSDKModel interface { + admin20240530.ReplicationSpec | admin.ReplicationSpec20240805 +} + +func flattenAdvancedReplicationSpecsLogic[T ReplicationSpecSDKModel]( + ctx context.Context, apiObjects []T, tfMapObjects []any, d *schema.ResourceData, + tfModelWithSDKMatcher func(map[string]any, *T) bool, + flattenRepSpec func(context.Context, *T, map[string]any, *schema.ResourceData, *admin.APIClient) (map[string]any, error), + connV2 *admin.APIClient) ([]map[string]any, error) { if len(apiObjects) == 0 { return nil, nil } @@ -451,11 +511,11 @@ func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin2024 } for j := 0; j < len(apiObjects); j++ { - if wasAPIObjectUsed[j] || !doesAdvancedReplicationSpecMatchAPI(tfMapObject, &apiObjects[j]) { + if wasAPIObjectUsed[j] || !tfModelWithSDKMatcher(tfMapObject, &apiObjects[j]) { continue } - advancedReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, &apiObjects[j], tfMapObject, d, connV220240530) + advancedReplicationSpec, err := flattenRepSpec(ctx, &apiObjects[j], tfMapObject, d, connV2) if err != nil { return nil, err @@ -479,7 +539,7 @@ func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin2024 } j := slices.IndexFunc(wasAPIObjectUsed, func(isUsed bool) bool { return !isUsed }) - advancedReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, &apiObjects[j], tfMapObject, d, connV220240530) + advancedReplicationSpec, err := flattenRepSpec(ctx, &apiObjects[j], tfMapObject, d, connV2) if err != nil { return nil, err @@ -492,41 +552,16 @@ func FlattenAdvancedReplicationSpecs(ctx context.Context, apiObjects []admin2024 return tfList, nil } -func doesAdvancedReplicationSpecMatchAPI(tfObject map[string]any, apiObject *admin20240530.ReplicationSpec) bool { +func doesAdvancedReplicationSpecMatchAPIOldSDK(tfObject map[string]any, apiObject *admin20240530.ReplicationSpec) bool { return tfObject["id"] == apiObject.GetId() || (tfObject["id"] == nil && tfObject["zone_name"] == apiObject.GetZoneName()) } -func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin20240530.ReplicationSpec, tfMapObject map[string]any, - d *schema.ResourceData, connV220240530 *admin20240530.APIClient) (map[string]any, error) { - if apiObject == nil { - return nil, nil - } - - tfMap := map[string]any{} - tfMap["num_shards"] = apiObject.GetNumShards() - tfMap["id"] = apiObject.GetId() - if tfMapObject != nil { - object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), tfMapObject["region_configs"].([]any), d, connV220240530) - if err != nil { - return nil, err - } - tfMap["region_configs"] = object - tfMap["container_id"] = containerIDs - } else { - object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), nil, d, connV220240530) - if err != nil { - return nil, err - } - tfMap["region_configs"] = object - tfMap["container_id"] = containerIDs - } - tfMap["zone_name"] = apiObject.GetZoneName() - - return tfMap, nil +func doesAdvancedReplicationSpecMatchAPI(tfObject map[string]any, apiObject *admin.ReplicationSpec20240805) bool { + return tfObject["external_id"] == apiObject.GetId() } -func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects []admin20240530.CloudRegionConfig, tfMapObjects []any, - d *schema.ResourceData, connV220240530 *admin20240530.APIClient) (tfResult []map[string]any, containersIDs map[string]string, err error) { +func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects []admin.CloudRegionConfig20240805, tfMapObjects []any, + d *schema.ResourceData, connV2 *admin.APIClient) (tfResult []map[string]any, containersIDs map[string]string, err error) { if len(apiObjects) == 0 { return nil, nil, nil } @@ -544,11 +579,11 @@ func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects } if apiObject.GetProviderName() != "TENANT" { - params := &admin20240530.ListPeeringContainerByCloudProviderApiParams{ + params := &admin.ListPeeringContainerByCloudProviderApiParams{ GroupId: d.Get("project_id").(string), ProviderName: apiObject.ProviderName, } - containers, _, err := connV220240530.NetworkPeeringApi.ListPeeringContainerByCloudProviderWithParams(ctx, params).Execute() + containers, _, err := connV2.NetworkPeeringApi.ListPeeringContainerByCloudProviderWithParams(ctx, params).Execute() if err != nil { return nil, nil, err } @@ -561,7 +596,7 @@ func flattenAdvancedReplicationSpecRegionConfigs(ctx context.Context, apiObjects return tfList, containerIDs, nil } -func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin20240530.CloudRegionConfig, tfMapObject map[string]any) map[string]any { +func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin.CloudRegionConfig20240805, tfMapObject map[string]any) map[string]any { if apiObject == nil { return nil } @@ -599,23 +634,25 @@ func flattenAdvancedReplicationSpecRegionConfig(apiObject *admin20240530.CloudRe return tfMap } -func hwSpecToDedicatedHwSpec(apiObject *admin20240530.HardwareSpec) *admin20240530.DedicatedHardwareSpec { +func hwSpecToDedicatedHwSpec(apiObject *admin.HardwareSpec20240805) *admin.DedicatedHardwareSpec20240805 { if apiObject == nil { return nil } - return &admin20240530.DedicatedHardwareSpec{ + return &admin.DedicatedHardwareSpec20240805{ NodeCount: apiObject.NodeCount, DiskIOPS: apiObject.DiskIOPS, EbsVolumeType: apiObject.EbsVolumeType, InstanceSize: apiObject.InstanceSize, + DiskSizeGB: apiObject.DiskSizeGB, } } -func dedicatedHwSpecToHwSpec(apiObject *admin20240530.DedicatedHardwareSpec) *admin20240530.HardwareSpec { +func dedicatedHwSpecToHwSpec(apiObject *admin.DedicatedHardwareSpec20240805) *admin.HardwareSpec20240805 { if apiObject == nil { return nil } - return &admin20240530.HardwareSpec{ + return &admin.HardwareSpec20240805{ + DiskSizeGB: apiObject.DiskSizeGB, NodeCount: apiObject.NodeCount, DiskIOPS: apiObject.DiskIOPS, EbsVolumeType: apiObject.EbsVolumeType, @@ -623,7 +660,7 @@ func dedicatedHwSpecToHwSpec(apiObject *admin20240530.DedicatedHardwareSpec) *ad } } -func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin20240530.DedicatedHardwareSpec, providerName string, tfMapObjects []any) []map[string]any { +func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHardwareSpec20240805, providerName string, tfMapObjects []any) []map[string]any { if apiObject == nil { return nil } @@ -644,6 +681,9 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin20240530.Ded tfMap["ebs_volume_type"] = apiObject.GetEbsVolumeType() } } + if _, ok := tfMapObject["disk_size_gb"]; ok { + tfMap["disk_size_gb"] = apiObject.GetDiskSizeGB() + } if _, ok := tfMapObject["node_count"]; ok { tfMap["node_count"] = apiObject.GetNodeCount() } @@ -652,6 +692,7 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin20240530.Ded tfList = append(tfList, tfMap) } } else { + tfMap["disk_size_gb"] = apiObject.GetDiskSizeGB() tfMap["disk_iops"] = apiObject.GetDiskIOPS() tfMap["ebs_volume_type"] = apiObject.GetEbsVolumeType() tfMap["node_count"] = apiObject.GetNodeCount() @@ -661,7 +702,7 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin20240530.Ded return tfList } -func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin20240530.AdvancedAutoScalingSettings) []map[string]any { +func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin.AdvancedAutoScalingSettings) []map[string]any { if apiObject == nil { return nil } @@ -680,7 +721,7 @@ func flattenAdvancedReplicationSpecAutoScaling(apiObject *admin20240530.Advanced return tfList } -func getAdvancedClusterContainerID(containers []admin20240530.CloudProviderContainer, cluster *admin20240530.CloudRegionConfig) string { +func getAdvancedClusterContainerID(containers []admin.CloudProviderContainer, cluster *admin.CloudRegionConfig20240805) string { if len(containers) == 0 { return "" } @@ -758,16 +799,16 @@ func expandProcessArgs(d *schema.ResourceData, p map[string]any) admin20240530.C return res } -func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin20240530.ComponentLabel, diag.Diagnostics) { +func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin.ComponentLabel, diag.Diagnostics) { list := d.Get("labels").(*schema.Set) - res := make([]admin20240530.ComponentLabel, list.Len()) + res := make([]admin.ComponentLabel, list.Len()) for i, val := range list.List() { v := val.(map[string]any) key := v["key"].(string) if key == ignoreLabel { return nil, diag.FromErr(fmt.Errorf("you should not set `Infrastructure Tool` label, it is used for internal purposes")) } - res[i] = admin20240530.ComponentLabel{ + res[i] = admin.ComponentLabel{ Key: conversion.StringPtr(key), Value: conversion.StringPtr(v["value"].(string)), } @@ -775,27 +816,60 @@ func expandLabelSliceFromSetSchema(d *schema.ResourceData) ([]admin20240530.Comp return res, nil } -func expandAdvancedReplicationSpecs(tfList []any) *[]admin20240530.ReplicationSpec { - if len(tfList) == 0 { +func expandAdvancedReplicationSpecs(tfList []any, rootDiskSizeGB *float64) *[]admin.ReplicationSpec20240805 { + var apiObjects []admin.ReplicationSpec20240805 + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok || tfMap == nil { + continue + } + apiObject := expandAdvancedReplicationSpec(tfMap, rootDiskSizeGB) + apiObjects = append(apiObjects, *apiObject) + + // handles adding additional replication spec objects if legacy num_shards attribute is being used and greater than 1 + numShards := tfMap["num_shards"].(int) + for range numShards - 1 { + apiObjects = append(apiObjects, *apiObject) + } + } + if apiObjects == nil { return nil } + return &apiObjects +} + +func expandAdvancedReplicationSpecsOldSDK(tfList []any) *[]admin20240530.ReplicationSpec { var apiObjects []admin20240530.ReplicationSpec for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { continue } - apiObject := expandAdvancedReplicationSpec(tfMap) + apiObject := expandAdvancedReplicationSpecOldSDK(tfMap) apiObjects = append(apiObjects, *apiObject) } + if apiObjects == nil { + return nil + } return &apiObjects } -func expandAdvancedReplicationSpec(tfMap map[string]any) *admin20240530.ReplicationSpec { +func expandAdvancedReplicationSpec(tfMap map[string]any, rootDiskSizeGB *float64) *admin.ReplicationSpec20240805 { + apiObject := &admin.ReplicationSpec20240805{ + ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), + RegionConfigs: expandRegionConfigs(tfMap["region_configs"].([]any), rootDiskSizeGB), + } + if tfMap["external_id"].(string) != "" { + apiObject.Id = conversion.StringPtr(tfMap["external_id"].(string)) + } + return apiObject +} + +func expandAdvancedReplicationSpecOldSDK(tfMap map[string]any) *admin20240530.ReplicationSpec { apiObject := &admin20240530.ReplicationSpec{ NumShards: conversion.Pointer(tfMap["num_shards"].(int)), ZoneName: conversion.StringPtr(tfMap["zone_name"].(string)), - RegionConfigs: expandRegionConfigs(tfMap["region_configs"].([]any)), + RegionConfigs: convertRegionConfigSliceToOldSDK(expandRegionConfigs(tfMap["region_configs"].([]any), nil)), } if tfMap["id"].(string) != "" { apiObject.Id = conversion.StringPtr(tfMap["id"].(string)) @@ -803,39 +877,38 @@ func expandAdvancedReplicationSpec(tfMap map[string]any) *admin20240530.Replicat return apiObject } -func expandRegionConfigs(tfList []any) *[]admin20240530.CloudRegionConfig { - if len(tfList) == 0 { - return nil - } - var apiObjects []admin20240530.CloudRegionConfig +func expandRegionConfigs(tfList []any, rootDiskSizeGB *float64) *[]admin.CloudRegionConfig20240805 { + var apiObjects []admin.CloudRegionConfig20240805 for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) if !ok || tfMap == nil { continue } - apiObject := expandRegionConfig(tfMap) + apiObject := expandRegionConfig(tfMap, rootDiskSizeGB) apiObjects = append(apiObjects, *apiObject) } - + if apiObjects == nil { + return nil + } return &apiObjects } -func expandRegionConfig(tfMap map[string]any) *admin20240530.CloudRegionConfig { +func expandRegionConfig(tfMap map[string]any, rootDiskSizeGB *float64) *admin.CloudRegionConfig20240805 { providerName := tfMap["provider_name"].(string) - apiObject := &admin20240530.CloudRegionConfig{ + apiObject := &admin.CloudRegionConfig20240805{ Priority: conversion.Pointer(cast.ToInt(tfMap["priority"])), ProviderName: conversion.StringPtr(providerName), RegionName: conversion.StringPtr(tfMap["region_name"].(string)), } if v, ok := tfMap["analytics_specs"]; ok && len(v.([]any)) > 0 { - apiObject.AnalyticsSpecs = expandRegionConfigSpec(v.([]any), providerName) + apiObject.AnalyticsSpecs = expandRegionConfigSpec(v.([]any), providerName, rootDiskSizeGB) } if v, ok := tfMap["electable_specs"]; ok && len(v.([]any)) > 0 { - apiObject.ElectableSpecs = dedicatedHwSpecToHwSpec(expandRegionConfigSpec(v.([]any), providerName)) + apiObject.ElectableSpecs = dedicatedHwSpecToHwSpec(expandRegionConfigSpec(v.([]any), providerName, rootDiskSizeGB)) } if v, ok := tfMap["read_only_specs"]; ok && len(v.([]any)) > 0 { - apiObject.ReadOnlySpecs = expandRegionConfigSpec(v.([]any), providerName) + apiObject.ReadOnlySpecs = expandRegionConfigSpec(v.([]any), providerName, rootDiskSizeGB) } if v, ok := tfMap["auto_scaling"]; ok && len(v.([]any)) > 0 { apiObject.AutoScaling = expandRegionConfigAutoScaling(v.([]any)) @@ -849,9 +922,9 @@ func expandRegionConfig(tfMap map[string]any) *admin20240530.CloudRegionConfig { return apiObject } -func expandRegionConfigSpec(tfList []any, providerName string) *admin20240530.DedicatedHardwareSpec { +func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *float64) *admin.DedicatedHardwareSpec20240805 { tfMap, _ := tfList[0].(map[string]any) - apiObject := new(admin20240530.DedicatedHardwareSpec) + apiObject := new(admin.DedicatedHardwareSpec20240805) if providerName == constant.AWS || providerName == constant.AZURE { if v, ok := tfMap["disk_iops"]; ok && v.(int) > 0 { apiObject.DiskIOPS = conversion.Pointer(v.(int)) @@ -868,14 +941,24 @@ func expandRegionConfigSpec(tfList []any, providerName string) *admin20240530.De if v, ok := tfMap["node_count"]; ok { apiObject.NodeCount = conversion.Pointer(v.(int)) } + + if v, ok := tfMap["disk_size_gb"]; ok && v.(float64) != 0 { + apiObject.DiskSizeGB = conversion.Pointer(v.(float64)) + } + + // value defined in root is set if it is defined in the create, or value has changed in the update. + if rootDiskSizeGB != nil { + apiObject.DiskSizeGB = rootDiskSizeGB + } + return apiObject } -func expandRegionConfigAutoScaling(tfList []any) *admin20240530.AdvancedAutoScalingSettings { +func expandRegionConfigAutoScaling(tfList []any) *admin.AdvancedAutoScalingSettings { tfMap, _ := tfList[0].(map[string]any) - settings := admin20240530.AdvancedAutoScalingSettings{ - DiskGB: new(admin20240530.DiskGBAutoScaling), - Compute: new(admin20240530.AdvancedComputeAutoScaling), + settings := admin.AdvancedAutoScalingSettings{ + DiskGB: new(admin.DiskGBAutoScaling), + Compute: new(admin.AdvancedComputeAutoScaling), } if v, ok := tfMap["disk_gb_enabled"]; ok { @@ -901,3 +984,89 @@ func expandRegionConfigAutoScaling(tfList []any) *admin20240530.AdvancedAutoScal } return &settings } + +func flattenAdvancedReplicationSpecsDS(ctx context.Context, apiRepSpecs []admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, d *schema.ResourceData, connV2 *admin.APIClient) ([]map[string]any, error) { + if len(apiRepSpecs) == 0 { + return nil, nil + } + + tfList := make([]map[string]any, len(apiRepSpecs)) + + for i, apiRepSpec := range apiRepSpecs { + tfReplicationSpec, err := flattenAdvancedReplicationSpec(ctx, &apiRepSpec, zoneNameToOldReplicationSpecIDs, nil, d, connV2) + if err != nil { + return nil, err + } + tfList[i] = tfReplicationSpec + } + return tfList, nil +} + +func flattenAdvancedReplicationSpec(ctx context.Context, apiObject *admin.ReplicationSpec20240805, zoneNameToOldReplicationSpecIDs map[string]string, tfMapObject map[string]any, + d *schema.ResourceData, connV2 *admin.APIClient) (map[string]any, error) { + if apiObject == nil { + return nil, nil + } + + tfMap := map[string]any{} + tfMap["external_id"] = apiObject.GetId() + + if oldID, ok := zoneNameToOldReplicationSpecIDs[apiObject.GetZoneName()]; ok { + tfMap["id"] = oldID // replicationSpecs.*.id stores value associated to old cluster API (2023-02-01) + } + + // define num_shards for backwards compatibility as this attribute has default value of 1. + tfMap["num_shards"] = 1 + + if tfMapObject != nil { + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), tfMapObject["region_configs"].([]any), d, connV2) + if err != nil { + return nil, err + } + tfMap["region_configs"] = object + tfMap["container_id"] = containerIDs + } else { + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, apiObject.GetRegionConfigs(), nil, d, connV2) + if err != nil { + return nil, err + } + tfMap["region_configs"] = object + tfMap["container_id"] = containerIDs + } + tfMap["zone_name"] = apiObject.GetZoneName() + tfMap["zone_id"] = apiObject.GetZoneId() + + return tfMap, nil +} + +func flattenAdvancedReplicationSpecOldSDK(ctx context.Context, apiObject *admin20240530.ReplicationSpec, zoneNameToZoneIDs map[string]string, rootDiskSizeGB float64, tfMapObject map[string]any, + d *schema.ResourceData, connV2 *admin.APIClient) (map[string]any, error) { + if apiObject == nil { + return nil, nil + } + + tfMap := map[string]any{} + tfMap["num_shards"] = apiObject.GetNumShards() + tfMap["id"] = apiObject.GetId() + if tfMapObject != nil { + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, *convertRegionConfigSliceToLatest(apiObject.RegionConfigs, rootDiskSizeGB), tfMapObject["region_configs"].([]any), d, connV2) + if err != nil { + return nil, err + } + tfMap["region_configs"] = object + tfMap["container_id"] = containerIDs + } else { + object, containerIDs, err := flattenAdvancedReplicationSpecRegionConfigs(ctx, *convertRegionConfigSliceToLatest(apiObject.RegionConfigs, rootDiskSizeGB), nil, d, connV2) + if err != nil { + return nil, err + } + tfMap["region_configs"] = object + tfMap["container_id"] = containerIDs + } + tfMap["zone_name"] = apiObject.GetZoneName() + if zoneID, ok := zoneNameToZoneIDs[apiObject.GetZoneName()]; ok { // zone id is not present on old API SDK, so we fetch values from new API and map them using zone name + tfMap["zone_id"] = zoneID + } + + return tfMap, nil +} diff --git a/internal/service/advancedcluster/model_advanced_cluster_test.go b/internal/service/advancedcluster/model_advanced_cluster_test.go index 5e9a5726fb..7d39eb45bb 100644 --- a/internal/service/advancedcluster/model_advanced_cluster_test.go +++ b/internal/service/advancedcluster/model_advanced_cluster_test.go @@ -3,24 +3,29 @@ package advancedcluster_test import ( "context" "errors" + "fmt" "net/http" "testing" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "go.mongodb.org/atlas-sdk/v20240805003/mockadmin" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - mockadmin20240530 "go.mongodb.org/atlas-sdk/v20240530005/mockadmin" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" ) var ( dummyClusterName = "clusterName" dummyProjectID = "projectId" errGeneric = errors.New("generic") - advancedClusters = []admin20240530.AdvancedClusterDescription{{StateName: conversion.StringPtr("NOT IDLE")}} + advancedClusters = []admin.ClusterDescription20240805{{StateName: conversion.StringPtr("NOT IDLE")}} ) func TestFlattenReplicationSpecs(t *testing.T) { @@ -122,18 +127,18 @@ func TestFlattenReplicationSpecs(t *testing.T) { } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - peeringAPI := mockadmin20240530.NetworkPeeringApi{} + peeringAPI := mockadmin.NetworkPeeringApi{} - peeringAPI.EXPECT().ListPeeringContainerByCloudProviderWithParams(mock.Anything, mock.Anything).Return(admin20240530.ListPeeringContainerByCloudProviderApiRequest{ApiService: &peeringAPI}) - containerResult := []admin20240530.CloudProviderContainer{{Id: conversion.StringPtr("c1"), RegionName: ®ionName, ProviderName: &providerName}} - peeringAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.Anything).Return(&admin20240530.PaginatedCloudProviderContainer{Results: &containerResult}, nil, nil) + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderWithParams(mock.Anything, mock.Anything).Return(admin.ListPeeringContainerByCloudProviderApiRequest{ApiService: &peeringAPI}) + containerResult := []admin.CloudProviderContainer{{Id: conversion.StringPtr("c1"), RegionName: ®ionName, ProviderName: &providerName}} + peeringAPI.EXPECT().ListPeeringContainerByCloudProviderExecute(mock.Anything).Return(&admin.PaginatedCloudProviderContainer{Results: &containerResult}, nil, nil) - client := &admin20240530.APIClient{ + client := &admin.APIClient{ NetworkPeeringApi: &peeringAPI, } resourceData := schema.TestResourceDataRaw(t, testSchema, map[string]any{"project_id": "p1"}) - tfOutputSpecs, err := advancedcluster.FlattenAdvancedReplicationSpecs(context.Background(), tc.adminSpecs, tc.tfInputSpecs, resourceData, client) + tfOutputSpecs, err := advancedcluster.FlattenAdvancedReplicationSpecsOldSDK(context.Background(), tc.adminSpecs, nil, 0, tc.tfInputSpecs, resourceData, client) require.NoError(t, err) assert.Len(t, tfOutputSpecs, tc.expectedLen) @@ -145,6 +150,46 @@ func TestFlattenReplicationSpecs(t *testing.T) { } } +func TestGetDiskSizeGBFromReplicationSpec(t *testing.T) { + diskSizeGBValue := 40.0 + + testCases := map[string]struct { + clusterDescription admin.ClusterDescription20240805 + expectedDiskSizeResult float64 + }{ + "cluster description with disk size gb value at electable spec": { + clusterDescription: admin.ClusterDescription20240805{ + ReplicationSpecs: &[]admin.ReplicationSpec20240805{{ + RegionConfigs: &[]admin.CloudRegionConfig20240805{{ + ElectableSpecs: &admin.HardwareSpec20240805{ + DiskSizeGB: admin.PtrFloat64(diskSizeGBValue), + }, + }}, + }}, + }, + expectedDiskSizeResult: diskSizeGBValue, + }, + "cluster description with no electable spec": { + clusterDescription: admin.ClusterDescription20240805{ + ReplicationSpecs: &[]admin.ReplicationSpec20240805{ + {RegionConfigs: &[]admin.CloudRegionConfig20240805{{}}}, + }, + }, + expectedDiskSizeResult: 0, + }, + "cluster description with no replication spec": { + clusterDescription: admin.ClusterDescription20240805{}, + expectedDiskSizeResult: 0, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := advancedcluster.GetDiskSizeGBFromReplicationSpec(&tc.clusterDescription) + assert.Equal(t, fmt.Sprintf("%.f", tc.expectedDiskSizeResult), fmt.Sprintf("%.f", result)) // formatting to string to avoid float comparison + }) + } +} + type Result struct { response any error error @@ -153,7 +198,7 @@ type Result struct { func TestUpgradeRefreshFunc(t *testing.T) { testCases := []struct { - mockCluster *admin20240530.AdvancedClusterDescription + mockCluster *admin.ClusterDescription20240805 mockResponse *http.Response expectedResult Result mockError error @@ -215,11 +260,11 @@ func TestUpgradeRefreshFunc(t *testing.T) { }, { name: "Successful", - mockCluster: &admin20240530.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, + mockCluster: &admin.ClusterDescription20240805{StateName: conversion.StringPtr("stateName")}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ - response: &admin20240530.AdvancedClusterDescription{StateName: conversion.StringPtr("stateName")}, + response: &admin.ClusterDescription20240805{StateName: conversion.StringPtr("stateName")}, state: "stateName", error: nil, }, @@ -228,9 +273,9 @@ func TestUpgradeRefreshFunc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testObject := mockadmin20240530.NewClustersApi(t) + testObject := mockadmin.NewClustersApi(t) - testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin20240530.GetClusterApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().GetCluster(mock.Anything, mock.Anything, mock.Anything).Return(admin.GetClusterApiRequest{ApiService: testObject}).Once() testObject.EXPECT().GetClusterExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() result, stateName, err := advancedcluster.UpgradeRefreshFunc(context.Background(), dummyClusterName, dummyProjectID, testObject)() @@ -247,7 +292,7 @@ func TestUpgradeRefreshFunc(t *testing.T) { func TestResourceListAdvancedRefreshFunc(t *testing.T) { testCases := []struct { - mockCluster *admin20240530.PaginatedAdvancedClusterDescription + mockCluster *admin.PaginatedClusterDescription20240805 mockResponse *http.Response expectedResult Result mockError error @@ -309,7 +354,7 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { }, { name: "Successful but with at least one cluster not idle", - mockCluster: &admin20240530.PaginatedAdvancedClusterDescription{Results: &advancedClusters}, + mockCluster: &admin.PaginatedClusterDescription20240805{Results: &advancedClusters}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ @@ -320,11 +365,11 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { }, { name: "Successful", - mockCluster: &admin20240530.PaginatedAdvancedClusterDescription{}, + mockCluster: &admin.PaginatedClusterDescription20240805{}, mockResponse: &http.Response{StatusCode: 200}, expectedError: false, expectedResult: Result{ - response: &admin20240530.PaginatedAdvancedClusterDescription{}, + response: &admin.PaginatedClusterDescription20240805{}, state: "IDLE", error: nil, }, @@ -333,9 +378,9 @@ func TestResourceListAdvancedRefreshFunc(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testObject := mockadmin20240530.NewClustersApi(t) + testObject := mockadmin.NewClustersApi(t) - testObject.EXPECT().ListClusters(mock.Anything, mock.Anything).Return(admin20240530.ListClustersApiRequest{ApiService: testObject}).Once() + testObject.EXPECT().ListClusters(mock.Anything, mock.Anything).Return(admin.ListClustersApiRequest{ApiService: testObject}).Once() testObject.EXPECT().ListClustersExecute(mock.Anything).Return(tc.mockCluster, tc.mockResponse, tc.mockError).Once() result, stateName, err := advancedcluster.ResourceClusterListAdvancedRefreshFunc(context.Background(), dummyProjectID, testObject)() diff --git a/internal/service/advancedcluster/model_sdk_version_conversion.go b/internal/service/advancedcluster/model_sdk_version_conversion.go new file mode 100644 index 0000000000..1a85c53309 --- /dev/null +++ b/internal/service/advancedcluster/model_sdk_version_conversion.go @@ -0,0 +1,326 @@ +package advancedcluster + +import ( + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" +) + +// Conversions from one SDK model version to another are used to avoid duplicating our flatten/expand conversion functions. +// - These functions must not contain any business logic. +// - All will be removed once we rely on a single API version. + +func convertTagsPtrToLatest(tags *[]admin20240530.ResourceTag) *[]admin.ResourceTag { + if tags == nil { + return nil + } + result := convertTagsToLatest(*tags) + return &result +} + +func convertTagsPtrToOldSDK(tags *[]admin.ResourceTag) *[]admin20240530.ResourceTag { + if tags == nil { + return nil + } + tagsSlice := *tags + results := make([]admin20240530.ResourceTag, len(tagsSlice)) + for i := range len(tagsSlice) { + tag := tagsSlice[i] + results[i] = admin20240530.ResourceTag{ + Key: tag.Key, + Value: tag.Value, + } + } + return &results +} + +func convertTagsToLatest(tags []admin20240530.ResourceTag) []admin.ResourceTag { + results := make([]admin.ResourceTag, len(tags)) + for i := range len(tags) { + tag := tags[i] + results[i] = admin.ResourceTag{ + Key: tag.Key, + Value: tag.Value, + } + } + return results +} + +func convertBiConnectToOldSDK(biconnector *admin.BiConnector) *admin20240530.BiConnector { + if biconnector == nil { + return nil + } + return &admin20240530.BiConnector{ + Enabled: biconnector.Enabled, + ReadPreference: biconnector.ReadPreference, + } +} + +func convertBiConnectToLatest(biconnector *admin20240530.BiConnector) *admin.BiConnector { + return &admin.BiConnector{ + Enabled: biconnector.Enabled, + ReadPreference: biconnector.ReadPreference, + } +} + +func convertConnectionStringToLatest(connStrings *admin20240530.ClusterConnectionStrings) *admin.ClusterConnectionStrings { + return &admin.ClusterConnectionStrings{ + AwsPrivateLink: connStrings.AwsPrivateLink, + AwsPrivateLinkSrv: connStrings.AwsPrivateLinkSrv, + Private: connStrings.Private, + PrivateEndpoint: convertPrivateEndpointToLatest(connStrings.PrivateEndpoint), + PrivateSrv: connStrings.PrivateSrv, + Standard: connStrings.Standard, + StandardSrv: connStrings.StandardSrv, + } +} + +func convertPrivateEndpointToLatest(privateEndpoints *[]admin20240530.ClusterDescriptionConnectionStringsPrivateEndpoint) *[]admin.ClusterDescriptionConnectionStringsPrivateEndpoint { + if privateEndpoints == nil { + return nil + } + peSlice := *privateEndpoints + results := make([]admin.ClusterDescriptionConnectionStringsPrivateEndpoint, len(peSlice)) + for i := range len(peSlice) { + pe := peSlice[i] + results[i] = admin.ClusterDescriptionConnectionStringsPrivateEndpoint{ + ConnectionString: pe.ConnectionString, + Endpoints: convertEndpointsToLatest(pe.Endpoints), + SrvConnectionString: pe.SrvConnectionString, + SrvShardOptimizedConnectionString: pe.SrvShardOptimizedConnectionString, + Type: pe.Type, + } + } + return &results +} + +func convertEndpointsToLatest(privateEndpoints *[]admin20240530.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint) *[]admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint { + if privateEndpoints == nil { + return nil + } + peSlice := *privateEndpoints + results := make([]admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint, len(peSlice)) + for i := range len(peSlice) { + pe := peSlice[i] + results[i] = admin.ClusterDescriptionConnectionStringsPrivateEndpointEndpoint{ + EndpointId: pe.EndpointId, + ProviderName: pe.ProviderName, + Region: pe.Region, + } + } + return &results +} + +func convertLabelsToLatest(labels *[]admin20240530.ComponentLabel) *[]admin.ComponentLabel { + labelSlice := *labels + results := make([]admin.ComponentLabel, len(labelSlice)) + for i := range len(labelSlice) { + label := labelSlice[i] + results[i] = admin.ComponentLabel{ + Key: label.Key, + Value: label.Value, + } + } + return &results +} + +func convertLabelSliceToOldSDK(slice []admin.ComponentLabel, err diag.Diagnostics) ([]admin20240530.ComponentLabel, diag.Diagnostics) { + if err != nil { + return nil, err + } + results := make([]admin20240530.ComponentLabel, len(slice)) + for i := range len(slice) { + label := slice[i] + results[i] = admin20240530.ComponentLabel{ + Key: label.Key, + Value: label.Value, + } + } + return results, nil +} + +func convertRegionConfigSliceToOldSDK(slice *[]admin.CloudRegionConfig20240805) *[]admin20240530.CloudRegionConfig { + if slice == nil { + return nil + } + cloudRegionSlice := *slice + results := make([]admin20240530.CloudRegionConfig, len(cloudRegionSlice)) + for i := range len(cloudRegionSlice) { + cloudRegion := cloudRegionSlice[i] + results[i] = admin20240530.CloudRegionConfig{ + ElectableSpecs: convertHardwareSpecToOldSDK(cloudRegion.ElectableSpecs), + Priority: cloudRegion.Priority, + ProviderName: cloudRegion.ProviderName, + RegionName: cloudRegion.RegionName, + AnalyticsAutoScaling: convertAdvancedAutoScalingSettingsToOldSDK(cloudRegion.AnalyticsAutoScaling), + AnalyticsSpecs: convertDedicatedHardwareSpecToOldSDK(cloudRegion.AnalyticsSpecs), + AutoScaling: convertAdvancedAutoScalingSettingsToOldSDK(cloudRegion.AutoScaling), + ReadOnlySpecs: convertDedicatedHardwareSpecToOldSDK(cloudRegion.ReadOnlySpecs), + BackingProviderName: cloudRegion.BackingProviderName, + } + } + return &results +} + +func convertHardwareSpecToOldSDK(hwspec *admin.HardwareSpec20240805) *admin20240530.HardwareSpec { + if hwspec == nil { + return nil + } + return &admin20240530.HardwareSpec{ + DiskIOPS: hwspec.DiskIOPS, + EbsVolumeType: hwspec.EbsVolumeType, + InstanceSize: hwspec.InstanceSize, + NodeCount: hwspec.NodeCount, + } +} + +func convertAdvancedAutoScalingSettingsToOldSDK(settings *admin.AdvancedAutoScalingSettings) *admin20240530.AdvancedAutoScalingSettings { + if settings == nil { + return nil + } + return &admin20240530.AdvancedAutoScalingSettings{ + Compute: convertAdvancedComputeAutoScalingToOldSDK(settings.Compute), + DiskGB: convertDiskGBAutoScalingToOldSDK(settings.DiskGB), + } +} + +func convertAdvancedComputeAutoScalingToOldSDK(settings *admin.AdvancedComputeAutoScaling) *admin20240530.AdvancedComputeAutoScaling { + if settings == nil { + return nil + } + return &admin20240530.AdvancedComputeAutoScaling{ + Enabled: settings.Enabled, + MaxInstanceSize: settings.MaxInstanceSize, + MinInstanceSize: settings.MinInstanceSize, + ScaleDownEnabled: settings.ScaleDownEnabled, + } +} + +func convertDiskGBAutoScalingToOldSDK(settings *admin.DiskGBAutoScaling) *admin20240530.DiskGBAutoScaling { + if settings == nil { + return nil + } + return &admin20240530.DiskGBAutoScaling{ + Enabled: settings.Enabled, + } +} + +func convertDedicatedHardwareSpecToOldSDK(spec *admin.DedicatedHardwareSpec20240805) *admin20240530.DedicatedHardwareSpec { + if spec == nil { + return nil + } + return &admin20240530.DedicatedHardwareSpec{ + NodeCount: spec.NodeCount, + DiskIOPS: spec.DiskIOPS, + EbsVolumeType: spec.EbsVolumeType, + InstanceSize: spec.InstanceSize, + } +} + +func convertDedicatedHwSpecToLatest(spec *admin20240530.DedicatedHardwareSpec, rootDiskSizeGB float64) *admin.DedicatedHardwareSpec20240805 { + if spec == nil { + return nil + } + return &admin.DedicatedHardwareSpec20240805{ + NodeCount: spec.NodeCount, + DiskIOPS: spec.DiskIOPS, + EbsVolumeType: spec.EbsVolumeType, + InstanceSize: spec.InstanceSize, + DiskSizeGB: &rootDiskSizeGB, + } +} + +func convertAdvancedAutoScalingSettingsToLatest(settings *admin20240530.AdvancedAutoScalingSettings) *admin.AdvancedAutoScalingSettings { + if settings == nil { + return nil + } + return &admin.AdvancedAutoScalingSettings{ + Compute: convertAdvancedComputeAutoScalingToLatest(settings.Compute), + DiskGB: convertDiskGBAutoScalingToLatest(settings.DiskGB), + } +} + +func convertAdvancedComputeAutoScalingToLatest(settings *admin20240530.AdvancedComputeAutoScaling) *admin.AdvancedComputeAutoScaling { + if settings == nil { + return nil + } + return &admin.AdvancedComputeAutoScaling{ + Enabled: settings.Enabled, + MaxInstanceSize: settings.MaxInstanceSize, + MinInstanceSize: settings.MinInstanceSize, + ScaleDownEnabled: settings.ScaleDownEnabled, + } +} + +func convertDiskGBAutoScalingToLatest(settings *admin20240530.DiskGBAutoScaling) *admin.DiskGBAutoScaling { + if settings == nil { + return nil + } + return &admin.DiskGBAutoScaling{ + Enabled: settings.Enabled, + } +} + +func convertHardwareSpecToLatest(hwspec *admin20240530.HardwareSpec, rootDiskSizeGB float64) *admin.HardwareSpec20240805 { + if hwspec == nil { + return nil + } + return &admin.HardwareSpec20240805{ + DiskIOPS: hwspec.DiskIOPS, + EbsVolumeType: hwspec.EbsVolumeType, + InstanceSize: hwspec.InstanceSize, + NodeCount: hwspec.NodeCount, + DiskSizeGB: &rootDiskSizeGB, + } +} + +func convertRegionConfigSliceToLatest(slice *[]admin20240530.CloudRegionConfig, rootDiskSizeGB float64) *[]admin.CloudRegionConfig20240805 { + if slice == nil { + return nil + } + cloudRegionSlice := *slice + results := make([]admin.CloudRegionConfig20240805, len(cloudRegionSlice)) + for i := range len(cloudRegionSlice) { + cloudRegion := cloudRegionSlice[i] + results[i] = admin.CloudRegionConfig20240805{ + ElectableSpecs: convertHardwareSpecToLatest(cloudRegion.ElectableSpecs, rootDiskSizeGB), + Priority: cloudRegion.Priority, + ProviderName: cloudRegion.ProviderName, + RegionName: cloudRegion.RegionName, + AnalyticsAutoScaling: convertAdvancedAutoScalingSettingsToLatest(cloudRegion.AnalyticsAutoScaling), + AnalyticsSpecs: convertDedicatedHwSpecToLatest(cloudRegion.AnalyticsSpecs, rootDiskSizeGB), + AutoScaling: convertAdvancedAutoScalingSettingsToLatest(cloudRegion.AutoScaling), + ReadOnlySpecs: convertDedicatedHwSpecToLatest(cloudRegion.ReadOnlySpecs, rootDiskSizeGB), + BackingProviderName: cloudRegion.BackingProviderName, + } + } + return &results +} + +func convertClusterDescToLatestExcludeRepSpecs(oldClusterDesc *admin20240530.AdvancedClusterDescription) *admin.ClusterDescription20240805 { + return &admin.ClusterDescription20240805{ + BackupEnabled: oldClusterDesc.BackupEnabled, + AcceptDataRisksAndForceReplicaSetReconfig: oldClusterDesc.AcceptDataRisksAndForceReplicaSetReconfig, + ClusterType: oldClusterDesc.ClusterType, + CreateDate: oldClusterDesc.CreateDate, + DiskWarmingMode: oldClusterDesc.DiskWarmingMode, + EncryptionAtRestProvider: oldClusterDesc.EncryptionAtRestProvider, + GlobalClusterSelfManagedSharding: oldClusterDesc.GlobalClusterSelfManagedSharding, + GroupId: oldClusterDesc.GroupId, + Id: oldClusterDesc.Id, + MongoDBMajorVersion: oldClusterDesc.MongoDBMajorVersion, + MongoDBVersion: oldClusterDesc.MongoDBVersion, + Name: oldClusterDesc.Name, + Paused: oldClusterDesc.Paused, + PitEnabled: oldClusterDesc.PitEnabled, + RootCertType: oldClusterDesc.RootCertType, + StateName: oldClusterDesc.StateName, + TerminationProtectionEnabled: oldClusterDesc.TerminationProtectionEnabled, + VersionReleaseSystem: oldClusterDesc.VersionReleaseSystem, + Tags: convertTagsPtrToLatest(oldClusterDesc.Tags), + BiConnector: convertBiConnectToLatest(oldClusterDesc.BiConnector), + ConnectionStrings: convertConnectionStringToLatest(oldClusterDesc.ConnectionStrings), + Labels: convertLabelsToLatest(oldClusterDesc.Labels), + } +} diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 5257ff1124..31d71545ad 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -12,16 +12,19 @@ import ( "strings" "time" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/spf13/cast" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "github.com/spf13/cast" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) const ( @@ -35,12 +38,12 @@ const ( ErrorAdvancedConfRead = "error reading Advanced Configuration Option form MongoDB Cluster (%s): %s" ErrorClusterAdvancedSetting = "error setting `%s` for MongoDB ClusterAdvanced (%s): %s" ErrorAdvancedClusterListStatus = "error awaiting MongoDB ClusterAdvanced List IDLE: %s" + ErrorOperationNotPermitted = "error operation not permitted" ignoreLabel = "Infrastructure Tool" + DeprecationOldSchemaAction = "Please refer to our examples, documentation, and 1.18.0 migration guide for more details at https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide.html.markdown" ) -type acCtxKey string - -var upgradeRequestCtxKey acCtxKey = "upgradeRequest" +var DeprecationMsgOldSchema = fmt.Sprintf("%s %s", constant.DeprecationParam, DeprecationOldSchemaAction) func Resource() *schema.Resource { return &schema.Resource{ @@ -109,9 +112,10 @@ func Resource() *schema.Resource { Computed: true, }, "disk_size_gb": { - Type: schema.TypeFloat, - Optional: true, - Computed: true, + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Deprecated: DeprecationMsgOldSchema, }, "encryption_at_rest_provider": { Type: schema.TypeString, @@ -122,7 +126,7 @@ func Resource() *schema.Resource { Type: schema.TypeSet, Optional: true, Set: HashFunctionForKeyValuePair, - Deprecated: fmt.Sprintf(constant.DeprecationParamByDateWithReplacement, "September 2024", "tags"), + Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -168,6 +172,15 @@ func Resource() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { + Type: schema.TypeString, + Computed: true, + Deprecated: DeprecationMsgOldSchema, + }, + "zone_id": { + Type: schema.TypeString, + Computed: true, + }, + "external_id": { Type: schema.TypeString, Computed: true, }, @@ -176,6 +189,7 @@ func Resource() *schema.Resource { Optional: true, Default: 1, ValidateFunc: validation.IntBetween(1, 50), + Deprecated: DeprecationMsgOldSchema, }, "region_configs": { Type: schema.TypeList, @@ -336,8 +350,14 @@ func schemaSpecs() *schema.Schema { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "disk_size_gb": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + }, "disk_iops": { Type: schema.TypeInt, Optional: true, @@ -346,6 +366,7 @@ func schemaSpecs() *schema.Schema { "ebs_volume_type": { Type: schema.TypeString, Optional: true, + Computed: true, }, "instance_size": { Type: schema.TypeString, @@ -367,12 +388,18 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) - params := &admin20240530.AdvancedClusterDescription{ + var rootDiskSizeGB *float64 + if v, ok := d.GetOk("disk_size_gb"); ok { + rootDiskSizeGB = conversion.Pointer(v.(float64)) + } + + params := &admin.ClusterDescription20240805{ Name: conversion.StringPtr(cast.ToString(d.Get("name"))), ClusterType: conversion.StringPtr(cast.ToString(d.Get("cluster_type"))), - ReplicationSpecs: expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any)), + ReplicationSpecs: expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any), rootDiskSizeGB), } if v, ok := d.GetOk("backup_enabled"); ok { @@ -381,9 +408,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if _, ok := d.GetOk("bi_connector_config"); ok { params.BiConnector = expandBiConnectorConfig(d) } - if v, ok := d.GetOk("disk_size_gb"); ok { - params.DiskSizeGB = conversion.Pointer(v.(float64)) - } + if v, ok := d.GetOk("encryption_at_rest_provider"); ok { params.EncryptionAtRestProvider = conversion.StringPtr(v.(string)) } @@ -397,7 +422,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if _, ok := d.GetOk("tags"); ok { - params.Tags = conversion.ExpandTagsFromSetSchemaOldSDK(d) + params.Tags = conversion.ExpandTagsFromSetSchema(d) } if v, ok := d.GetOk("mongo_db_major_version"); ok { params.MongoDBMajorVersion = conversion.StringPtr(FormatMongoDBMajorVersion(v.(string))) @@ -425,13 +450,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - cluster, _, err := connV220240530.ClustersApi.CreateCluster(ctx, projectID, params).Execute() + cluster, _, err := connV2.ClustersApi.CreateCluster(ctx, projectID, params).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) } timeout := d.Timeout(schema.TimeoutCreate) - stateConf := CreateStateChangeConfig(ctx, connV220240530, projectID, d.Get("name").(string), timeout) + stateConf := CreateStateChangeConfig(ctx, connV2, projectID, d.Get("name").(string), timeout) _, err = stateConf.WaitForStateContext(ctx) if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) @@ -448,11 +473,13 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } if v := d.Get("paused").(bool); v { - request := &admin20240530.AdvancedClusterDescription{ + request := &admin.ClusterDescription20240805{ Paused: conversion.Pointer(v), } - _, _, err = updateAdvancedCluster(ctx, connV220240530, request, projectID, d.Get("name").(string), timeout) - if err != nil { + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, d.Get("name").(string), request).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, d.Get("name").(string), err)) + } + if err = waitForUpdateToFinish(ctx, connV2, projectID, d.Get("name").(string), timeout); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, d.Get("name").(string), err)) } } @@ -466,11 +493,11 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return resourceRead(ctx, d, meta) } -func CreateStateChangeConfig(ctx context.Context, connV220240530 *admin20240530.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { +func CreateStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { return retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"}, Target: []string{"IDLE"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), Timeout: timeout, MinTimeout: 1 * time.Minute, Delay: 3 * time.Minute, @@ -479,28 +506,139 @@ func CreateStateChangeConfig(ctx context.Context, connV220240530 *admin20240530. func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - cluster, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + var clusterResp *admin.ClusterDescription20240805 + + var replicationSpecs []map[string]any + if isUsingOldAPISchemaStructure(d) { + clusterOldSDK, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) + } + + if err := d.Set("disk_size_gb", clusterOldSDK.GetDiskSizeGB()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) + } + + zoneNameToZoneIDs, err := getZoneIDsFromNewAPI(ctx, projectID, clusterName, connV2) + if err != nil { + return diag.FromErr(err) + } + + replicationSpecs, err = FlattenAdvancedReplicationSpecsOldSDK(ctx, clusterOldSDK.GetReplicationSpecs(), zoneNameToZoneIDs, clusterOldSDK.GetDiskSizeGB(), d.Get("replication_specs").([]any), d, connV2) + if err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) + } + + clusterResp = convertClusterDescToLatestExcludeRepSpecs(clusterOldSDK) + } else { + cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) + } + + // root disk_size_gb defined for backwards compatibility avoiding breaking changes + if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(cluster)); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) + } + + zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) + if err != nil { + return diag.FromErr(err) + } + + replicationSpecs, err = flattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), zoneNameToOldReplicationSpecIDs, d.Get("replication_specs").([]any), d, connV2) + if err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) + } + + clusterResp = cluster + } + + diags := setRootFields(d, clusterResp, true) + if diags.HasError() { + return diags + } + + if err := d.Set("replication_specs", replicationSpecs); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) + } + + processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil + return diag.FromErr(fmt.Errorf(errorConfigRead, clusterName, err)) + } + + if err := d.Set("advanced_configuration", flattenProcessArgs(processArgs)); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "advanced_configuration", clusterName, err)) + } + + return nil +} + +// getReplicationSpecIDsFromOldAPI returns the id values of replication specs coming from old API. This is used to populate old replication_specs.*.id attribute avoiding breaking changes. +// In the old API each replications spec has a 1:1 relation with each zone, so ids are returned in a map from zoneName to id. +func getReplicationSpecIDsFromOldAPI(ctx context.Context, projectID, clusterName string, connV220240530 *admin20240530.APIClient) (map[string]string, error) { + clusterOldAPI, _, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if apiError, ok := admin20240530.AsError(err); ok { + if apiError.GetErrorCode() == "ASYMMETRIC_SHARD_UNSUPPORTED" { + return nil, nil // if its the case of an asymmetric shard an error is expected in old API, replication_specs.*.id attribute will not be populated } - return diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) + readErrorMsg := "error reading advanced cluster with 2023-02-01 API (%s): %s" + return nil, fmt.Errorf(readErrorMsg, clusterName, err) } + specs := clusterOldAPI.GetReplicationSpecs() + result := make(map[string]string, len(specs)) + for _, spec := range specs { + result[spec.GetZoneName()] = spec.GetId() + } + return result, nil +} - if err := d.Set("cluster_id", cluster.GetId()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "cluster_id", clusterName, err)) +// getZoneIDsFromNewAPI returns the zone id values of replication specs coming from new API. This is used to populate zone_id when old API is called in the read. +func getZoneIDsFromNewAPI(ctx context.Context, projectID, clusterName string, connV2 *admin.APIClient) (map[string]string, error) { + cluster, _, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + return nil, fmt.Errorf("error reading advanced cluster for fetching zone ids (%s): %s", clusterName, err) + } + specs := cluster.GetReplicationSpecs() + result := make(map[string]string, len(specs)) + for _, spec := range specs { + result[spec.GetZoneName()] = spec.GetZoneId() + } + return result, nil +} + +func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription20240805, isResourceSchema bool) diag.Diagnostics { + clusterName := *cluster.Name + + if isResourceSchema { + if err := d.Set("cluster_id", cluster.GetId()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "cluster_id", clusterName, err)) + } + + if err := d.Set("accept_data_risks_and_force_replica_set_reconfig", conversion.TimePtrToStringPtr(cluster.AcceptDataRisksAndForceReplicaSetReconfig)); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "accept_data_risks_and_force_replica_set_reconfig", clusterName, err)) + } } if err := d.Set("backup_enabled", cluster.GetBackupEnabled()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "backup_enabled", clusterName, err)) } - if err := d.Set("bi_connector_config", flattenBiConnectorConfig(cluster.GetBiConnector())); err != nil { + if err := d.Set("bi_connector_config", flattenBiConnectorConfig(cluster.BiConnector)); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "bi_connector_config", clusterName, err)) } @@ -508,7 +646,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "cluster_type", clusterName, err)) } - if err := d.Set("connection_strings", flattenConnectionStrings(cluster.GetConnectionStrings())); err != nil { + if err := d.Set("connection_strings", flattenConnectionStrings(*cluster.ConnectionStrings)); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "connection_strings", clusterName, err)) } @@ -516,10 +654,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "create_date", clusterName, err)) } - if err := d.Set("disk_size_gb", cluster.GetDiskSizeGB()); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err)) - } - if err := d.Set("encryption_at_rest_provider", cluster.GetEncryptionAtRestProvider()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "encryption_at_rest_provider", clusterName, err)) } @@ -528,7 +662,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "labels", clusterName, err)) } - if err := d.Set("tags", conversion.FlattenTagsOldSDK(cluster.GetTags())); err != nil { + if err := d.Set("tags", flattenTags(cluster.Tags)); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "tags", clusterName, err)) } @@ -552,15 +686,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "pit_enabled", clusterName, err)) } - replicationSpecs, err := FlattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV220240530) - if err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) - } - - if err := d.Set("replication_specs", replicationSpecs); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) - } - if err := d.Set("root_cert_type", cluster.GetRootCertType()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "state_name", clusterName, err)) } @@ -577,47 +702,43 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "version_release_system", clusterName, err)) } - if err := d.Set("accept_data_risks_and_force_replica_set_reconfig", conversion.TimePtrToStringPtr(cluster.AcceptDataRisksAndForceReplicaSetReconfig)); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "accept_data_risks_and_force_replica_set_reconfig", clusterName, err)) - } - if err := d.Set("global_cluster_self_managed_sharding", cluster.GetGlobalClusterSelfManagedSharding()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err)) } - processArgs, _, err := connV220240530.ClustersApi.GetClusterAdvancedConfiguration(ctx, projectID, clusterName).Execute() - if err != nil { - return diag.FromErr(fmt.Errorf(errorConfigRead, clusterName, err)) - } + return nil +} - if err := d.Set("advanced_configuration", flattenProcessArgs(processArgs)); err != nil { - return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "advanced_configuration", clusterName, err)) +// For both read and update operations if old sharding schema structure is used (at least one replication spec with numShards > 1) we continue to invoke the old API +func isUsingOldAPISchemaStructure(d *schema.ResourceData) bool { + tfList := d.Get("replication_specs").([]any) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok || tfMap == nil { + continue + } + numShards := tfMap["num_shards"].(int) + if numShards > 1 { + return true + } } - return nil + return false } func resourceUpdateOrUpgrade(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { if upgradeRequest := getUpgradeRequest(d); upgradeRequest != nil { - upgradeCtx := context.WithValue(ctx, upgradeRequestCtxKey, upgradeRequest) - return resourceUpgrade(upgradeCtx, d, meta) + return resourceUpgrade(ctx, upgradeRequest, d, meta) } - return resourceUpdate(ctx, d, meta) } -func resourceUpgrade(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 +func resourceUpgrade(ctx context.Context, upgradeRequest *admin.LegacyAtlasTenantClusterUpgradeRequest, d *schema.ResourceData, meta any) diag.Diagnostics { + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - upgradeRequest := ctx.Value(upgradeRequestCtxKey).(*admin20240530.LegacyAtlasTenantClusterUpgradeRequest) - - if upgradeRequest == nil { - return diag.FromErr(fmt.Errorf("upgrade called without %s in ctx", string(upgradeRequestCtxKey))) - } - - upgradeResponse, _, err := upgradeCluster(ctx, connV220240530, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) + upgradeResponse, _, err := upgradeCluster(ctx, connV2, upgradeRequest, projectID, clusterName, d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) @@ -634,12 +755,97 @@ func resourceUpgrade(ctx context.Context, d *schema.ResourceData, meta any) diag func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - cluster := new(admin20240530.AdvancedClusterDescription) - clusterChangeDetect := new(admin20240530.AdvancedClusterDescription) + if v, err := isUpdateAllowed(d); !v { + return diag.FromErr(fmt.Errorf("%s: %s", ErrorOperationNotPermitted, err)) + } + + timeout := d.Timeout(schema.TimeoutUpdate) + + if isUsingOldAPISchemaStructure(d) { + req, diags := updateRequestOldAPI(d, clusterName) + if diags != nil { + return diags + } + clusterChangeDetect := new(admin20240530.AdvancedClusterDescription) + if !reflect.DeepEqual(req, clusterChangeDetect) { + if _, _, err := connV220240530.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + } + } else { + req, diags := updateRequest(ctx, d, projectID, clusterName, connV2) + if diags != nil { + return diags + } + clusterChangeDetect := new(admin.ClusterDescription20240805) + if !reflect.DeepEqual(req, clusterChangeDetect) { + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + } + } + + if d.HasChange("advanced_configuration") { + ac := d.Get("advanced_configuration") + if aclist, ok := ac.([]any); ok && len(aclist) > 0 { + params := expandProcessArgs(d, aclist[0].(map[string]any)) + if !reflect.DeepEqual(params, admin20240530.ClusterDescriptionProcessArgs{}) { + _, _, err := connV220240530.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() + if err != nil { + return diag.FromErr(fmt.Errorf(errorConfigUpdate, clusterName, err)) + } + } + } + } + + if d.Get("paused").(bool) { + clusterRequest := &admin.ClusterDescription20240805{ + Paused: conversion.Pointer(true), + } + if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, clusterRequest).Execute(); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { + return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + } + } + + return resourceRead(ctx, d, meta) +} + +func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clusterName string, connV2 *admin.APIClient) (*admin.ClusterDescription20240805, diag.Diagnostics) { + cluster := new(admin.ClusterDescription20240805) + + if d.HasChange("replication_specs") || d.HasChange("disk_size_gb") { + var updatedDiskSizeGB *float64 + if d.HasChange("disk_size_gb") { + updatedDiskSizeGB = conversion.Pointer(d.Get("disk_size_gb").(float64)) + } + updatedReplicationSpecs := expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any), updatedDiskSizeGB) + + // case where sharding schema is transitioning from legacy to new structure (external_id is not present in the state so no ids are are currently present) + if noIDsPopulatedInReplicationSpecs(updatedReplicationSpecs) { + // ids need to be populated to avoid error in the update request + specsWithIDs, diags := populateIDValuesUsingNewAPI(ctx, projectID, clusterName, connV2.ClustersApi, updatedReplicationSpecs) + if diags != nil { + return nil, diags + } + updatedReplicationSpecs = specsWithIDs + } + SyncAutoScalingConfigs(updatedReplicationSpecs) + cluster.ReplicationSpecs = updatedReplicationSpecs + } if d.HasChange("backup_enabled") { cluster.BackupEnabled = conversion.Pointer(d.Get("backup_enabled").(bool)) @@ -653,10 +859,6 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. cluster.ClusterType = conversion.StringPtr(d.Get("cluster_type").(string)) } - if d.HasChange("disk_size_gb") { - cluster.DiskSizeGB = conversion.Pointer(d.Get("disk_size_gb").(float64)) - } - if d.HasChange("encryption_at_rest_provider") { cluster.EncryptionAtRestProvider = conversion.StringPtr(d.Get("encryption_at_rest_provider").(string)) } @@ -664,13 +866,13 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if d.HasChange("labels") { labels, err := expandLabelSliceFromSetSchema(d) if err != nil { - return err + return nil, err } cluster.Labels = &labels } if d.HasChange("tags") { - cluster.Tags = conversion.ExpandTagsFromSetSchemaOldSDK(d) + cluster.Tags = conversion.ExpandTagsFromSetSchema(d) } if d.HasChange("mongo_db_major_version") { @@ -681,8 +883,87 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. cluster.PitEnabled = conversion.Pointer(d.Get("pit_enabled").(bool)) } + if d.HasChange("root_cert_type") { + cluster.RootCertType = conversion.StringPtr(d.Get("root_cert_type").(string)) + } + + if d.HasChange("termination_protection_enabled") { + cluster.TerminationProtectionEnabled = conversion.Pointer(d.Get("termination_protection_enabled").(bool)) + } + + if d.HasChange("version_release_system") { + cluster.VersionReleaseSystem = conversion.StringPtr(d.Get("version_release_system").(string)) + } + + if d.HasChange("global_cluster_self_managed_sharding") { + cluster.GlobalClusterSelfManagedSharding = conversion.Pointer(d.Get("global_cluster_self_managed_sharding").(bool)) + } + + if d.HasChange("accept_data_risks_and_force_replica_set_reconfig") { + if strTime := d.Get("accept_data_risks_and_force_replica_set_reconfig").(string); strTime != "" { + t, ok := conversion.StringToTime(strTime) + if !ok { + return nil, diag.FromErr(fmt.Errorf(errorUpdate, clusterName, "accept_data_risks_and_force_replica_set_reconfig time format is incorrect")) + } + cluster.AcceptDataRisksAndForceReplicaSetReconfig = &t + } + } + + if d.HasChange("paused") && !d.Get("paused").(bool) { + cluster.Paused = conversion.Pointer(d.Get("paused").(bool)) + } + return cluster, nil +} + +func updateRequestOldAPI(d *schema.ResourceData, clusterName string) (*admin20240530.AdvancedClusterDescription, diag.Diagnostics) { + cluster := new(admin20240530.AdvancedClusterDescription) + if d.HasChange("replication_specs") { - cluster.ReplicationSpecs = expandAdvancedReplicationSpecs(d.Get("replication_specs").([]any)) + cluster.ReplicationSpecs = expandAdvancedReplicationSpecsOldSDK(d.Get("replication_specs").([]any)) + } + + if d.HasChange("disk_size_gb") { + cluster.DiskSizeGB = conversion.Pointer(d.Get("disk_size_gb").(float64)) + } + + if changedValue := obtainChangeForDiskSizeGBInFirstRegion(d); changedValue != nil { + cluster.DiskSizeGB = changedValue + } + + if d.HasChange("backup_enabled") { + cluster.BackupEnabled = conversion.Pointer(d.Get("backup_enabled").(bool)) + } + + if d.HasChange("bi_connector_config") { + cluster.BiConnector = convertBiConnectToOldSDK(expandBiConnectorConfig(d)) + } + + if d.HasChange("cluster_type") { + cluster.ClusterType = conversion.StringPtr(d.Get("cluster_type").(string)) + } + + if d.HasChange("encryption_at_rest_provider") { + cluster.EncryptionAtRestProvider = conversion.StringPtr(d.Get("encryption_at_rest_provider").(string)) + } + + if d.HasChange("labels") { + labels, err := convertLabelSliceToOldSDK(expandLabelSliceFromSetSchema(d)) + if err != nil { + return nil, err + } + cluster.Labels = &labels + } + + if d.HasChange("tags") { + cluster.Tags = convertTagsPtrToOldSDK(conversion.ExpandTagsFromSetSchema(d)) + } + + if d.HasChange("mongo_db_major_version") { + cluster.MongoDBMajorVersion = conversion.StringPtr(FormatMongoDBMajorVersion(d.Get("mongo_db_major_version"))) + } + + if d.HasChange("pit_enabled") { + cluster.PitEnabled = conversion.Pointer(d.Get("pit_enabled").(bool)) } if d.HasChange("root_cert_type") { @@ -705,7 +986,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if strTime := d.Get("accept_data_risks_and_force_replica_set_reconfig").(string); strTime != "" { t, ok := conversion.StringToTime(strTime) if !ok { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, "accept_data_risks_and_force_replica_set_reconfig time format is incorrect")) + return nil, diag.FromErr(fmt.Errorf(errorUpdate, clusterName, "accept_data_risks_and_force_replica_set_reconfig time format is incorrect")) } cluster.AcceptDataRisksAndForceReplicaSetReconfig = &t } @@ -714,59 +995,63 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if d.HasChange("paused") && !d.Get("paused").(bool) { cluster.Paused = conversion.Pointer(d.Get("paused").(bool)) } + return cluster, nil +} - timeout := d.Timeout(schema.TimeoutUpdate) +func isUpdateAllowed(d *schema.ResourceData) (bool, error) { + cs, us := d.GetChange("replication_specs") + currentSpecs, updatedSpecs := cs.([]any), us.([]any) - if d.HasChange("advanced_configuration") { - ac := d.Get("advanced_configuration") - if aclist, ok := ac.([]any); ok && len(aclist) > 0 { - params := expandProcessArgs(d, aclist[0].(map[string]any)) - if !reflect.DeepEqual(params, admin20240530.ClusterDescriptionProcessArgs{}) { - _, _, err := connV220240530.ClustersApi.UpdateClusterAdvancedConfiguration(ctx, projectID, clusterName, ¶ms).Execute() - if err != nil { - return diag.FromErr(fmt.Errorf(errorConfigUpdate, clusterName, err)) - } + isNewSchemaCompatible := checkNewSchemaCompatibility(currentSpecs) + + for _, specRaw := range updatedSpecs { + if specMap, ok := specRaw.(map[string]any); ok && specMap != nil { + numShards, _ := specMap["num_shards"].(int) + if numShards > 1 && isNewSchemaCompatible { + return false, fmt.Errorf("cannot increase num_shards to > 1 under the current configuration. New shards can be defined by adding new replication spec objects; %s", DeprecationOldSchemaAction) } } } + return true, nil +} - // Has changes - if !reflect.DeepEqual(cluster, clusterChangeDetect) { - err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { - _, resp, err := updateAdvancedCluster(ctx, connV220240530, cluster, projectID, clusterName, timeout) - if err != nil { - if resp == nil || resp.StatusCode == 400 { - return retry.NonRetryableError(fmt.Errorf(errorUpdate, clusterName, err)) - } - return retry.RetryableError(fmt.Errorf(errorUpdate, clusterName, err)) +func checkNewSchemaCompatibility(specs []any) bool { + for _, specRaw := range specs { + if specMap, ok := specRaw.(map[string]any); ok && specMap != nil { + numShards, _ := specMap["num_shards"].(int) + if numShards >= 2 { + return false } - return nil - }) - if err != nil { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } } + return true +} - if d.Get("paused").(bool) { - clusterRequest := &admin20240530.AdvancedClusterDescription{ - Paused: conversion.Pointer(true), - } - _, _, err := updateAdvancedCluster(ctx, connV220240530, clusterRequest, projectID, clusterName, timeout) - if err != nil { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) - } +// When legacy schema structure is used we invoke the old API for updates. This API sends diskSizeGB at root level. +// This function is used to detect if changes are made in the inner spec levels. It assumes that all disk_size_gb values at the inner spec level have the same value, so it looks into first region config. +func obtainChangeForDiskSizeGBInFirstRegion(d *schema.ResourceData) *float64 { + electableLocation := "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb" + readOnlyLocation := "replication_specs.0.region_configs.0.read_only_specs.0.disk_size_gb" + analyticsLocation := "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb" + if d.HasChange(electableLocation) { + return admin.PtrFloat64(d.Get(electableLocation).(float64)) } - - return resourceRead(ctx, d, meta) + if d.HasChange(readOnlyLocation) { + return admin.PtrFloat64(d.Get(readOnlyLocation).(float64)) + } + if d.HasChange(analyticsLocation) { + return admin.PtrFloat64(d.Get(analyticsLocation).(float64)) + } + return nil } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - params := &admin20240530.DeleteClusterApiParams{ + params := &admin.DeleteClusterApiParams{ GroupId: projectID, ClusterName: clusterName, } @@ -774,14 +1059,14 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. params.RetainBackups = conversion.Pointer(v.(bool)) } - _, err := connV220240530.ClustersApi.DeleteClusterWithParams(ctx, params).Execute() + _, err := connV2.ClustersApi.DeleteClusterWithParams(ctx, params).Execute() if err != nil { return diag.FromErr(fmt.Errorf(errorDelete, clusterName, err)) } log.Println("[INFO] Waiting for MongoDB ClusterAdvanced to be destroyed") - stateConf := DeleteStateChangeConfig(ctx, connV220240530, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) + stateConf := DeleteStateChangeConfig(ctx, connV2, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) // Wait, catching any errors _, err = stateConf.WaitForStateContext(ctx) if err != nil { @@ -791,11 +1076,11 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. return nil } -func DeleteStateChangeConfig(ctx context.Context, connV220240530 *admin20240530.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { +func DeleteStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { return retry.StateChangeConf{ Pending: []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"}, Target: []string{"DELETED"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, @@ -803,14 +1088,14 @@ func DeleteStateChangeConfig(ctx context.Context, connV220240530 *admin20240530. } func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID, name, err := splitSClusterAdvancedImportID(d.Id()) if err != nil { return nil, err } - cluster, _, err := connV220240530.ClustersApi.GetCluster(ctx, *projectID, *name).Execute() + cluster, _, err := connV2.ClustersApi.GetCluster(ctx, *projectID, *name).Execute() if err != nil { return nil, fmt.Errorf("couldn't import cluster %s in project %s, error: %s", *name, *projectID, err) } @@ -832,10 +1117,10 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func upgradeCluster(ctx context.Context, connV220240530 *admin20240530.APIClient, request *admin20240530.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin20240530.LegacyAtlasCluster, *http.Response, error) { +func upgradeCluster(ctx context.Context, connV2 *admin.APIClient, request *admin.LegacyAtlasTenantClusterUpgradeRequest, projectID, name string, timeout time.Duration) (*admin.LegacyAtlasCluster, *http.Response, error) { request.Name = name - cluster, resp, err := connV220240530.ClustersApi.UpgradeSharedCluster(ctx, projectID, request).Execute() + cluster, resp, err := connV2.ClustersApi.UpgradeSharedCluster(ctx, projectID, request).Execute() if err != nil { return nil, nil, err } @@ -843,7 +1128,7 @@ func upgradeCluster(ctx context.Context, connV220240530 *admin20240530.APIClient stateConf := &retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, Target: []string{"IDLE"}, - Refresh: UpgradeRefreshFunc(ctx, name, projectID, connV220240530.ClustersApi), + Refresh: UpgradeRefreshFunc(ctx, name, projectID, connV2.ClustersApi), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, @@ -873,9 +1158,9 @@ func splitSClusterAdvancedImportID(id string) (projectID, clusterName *string, e return } -func resourceRefreshFunc(ctx context.Context, name, projectID string, connV220240530 *admin20240530.APIClient) retry.StateRefreshFunc { +func resourceRefreshFunc(ctx context.Context, name, projectID string, connV2 *admin.APIClient) retry.StateRefreshFunc { return func() (any, string, error) { - cluster, resp, err := connV220240530.ClustersApi.GetCluster(ctx, projectID, name).Execute() + cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, name).Execute() if err != nil && strings.Contains(err.Error(), "reset by peer") { return nil, "REPEATING", nil } @@ -908,14 +1193,14 @@ func replicationSpecsHashSet(v any) int { return schema.HashString(buf.String()) } -func getUpgradeRequest(d *schema.ResourceData) *admin20240530.LegacyAtlasTenantClusterUpgradeRequest { +func getUpgradeRequest(d *schema.ResourceData) *admin.LegacyAtlasTenantClusterUpgradeRequest { if !d.HasChange("replication_specs") { return nil } cs, us := d.GetChange("replication_specs") - currentSpecs := expandAdvancedReplicationSpecs(cs.([]any)) - updatedSpecs := expandAdvancedReplicationSpecs(us.([]any)) + currentSpecs := expandAdvancedReplicationSpecsOldSDK(cs.([]any)) + updatedSpecs := expandAdvancedReplicationSpecsOldSDK(us.([]any)) if currentSpecs == nil || updatedSpecs == nil || len(*currentSpecs) != 1 || len(*updatedSpecs) != 1 || len((*currentSpecs)[0].GetRegionConfigs()) != 1 || len((*updatedSpecs)[0].GetRegionConfigs()) != 1 { return nil @@ -929,8 +1214,8 @@ func getUpgradeRequest(d *schema.ResourceData) *admin20240530.LegacyAtlasTenantC return nil } - return &admin20240530.LegacyAtlasTenantClusterUpgradeRequest{ - ProviderSettings: &admin20240530.ClusterProviderSettings{ + return &admin.LegacyAtlasTenantClusterUpgradeRequest{ + ProviderSettings: &admin.ClusterProviderSettings{ ProviderName: updatedRegion.GetProviderName(), InstanceSizeName: updatedRegion.ElectableSpecs.InstanceSize, RegionName: updatedRegion.RegionName, @@ -938,31 +1223,16 @@ func getUpgradeRequest(d *schema.ResourceData) *admin20240530.LegacyAtlasTenantC } } -func updateAdvancedCluster( - ctx context.Context, - connV220240530 *admin20240530.APIClient, - request *admin20240530.AdvancedClusterDescription, - projectID, name string, - timeout time.Duration, -) (*admin20240530.AdvancedClusterDescription, *http.Response, error) { - cluster, resp, err := connV220240530.ClustersApi.UpdateCluster(ctx, projectID, name, request).Execute() - if err != nil { - return nil, nil, err - } - +func waitForUpdateToFinish(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, Target: []string{"IDLE"}, - Refresh: resourceRefreshFunc(ctx, name, projectID, connV220240530), + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), Timeout: timeout, MinTimeout: 30 * time.Second, Delay: 1 * time.Minute, } - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { - return nil, nil, err - } - - return cluster, resp, nil + _, err := stateConf.WaitForStateContext(ctx) + return err } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go b/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go index 747ba13601..69bd1d9db3 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go @@ -3,6 +3,7 @@ package advancedcluster_test import ( "fmt" "os" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -11,11 +12,38 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" ) -func TestMigAdvancedCluster_singleAWSProvider(t *testing.T) { +// last version that did not support new sharding schema or attributes +const versionBeforeISSRelease = "1.17.6" + +func TestMigAdvancedCluster_replicaSetAWSProvider(t *testing.T) { + testCase := replicaSetAWSProviderTestCase(t) + mig.CreateAndRunTest(t, &testCase) +} + +func TestMigAdvancedCluster_replicaSetMultiCloud(t *testing.T) { + testCase := replicaSetMultiCloudTestCase(t) + mig.CreateAndRunTest(t, &testCase) +} + +func TestMigAdvancedCluster_singleShardedMultiCloud(t *testing.T) { + testCase := singleShardedMultiCloudTestCase(t) + mig.CreateAndRunTest(t, &testCase) +} + +func TestMigAdvancedCluster_symmetricGeoShardedOldSchema(t *testing.T) { + testCase := symmetricGeoShardedOldSchemaTestCase(t) + mig.CreateAndRunTest(t, &testCase) +} + +func TestMigAdvancedCluster_asymmetricShardedNewSchema(t *testing.T) { + testCase := asymmetricShardedNewSchemaTestCase(t) + mig.CreateAndRunTest(t, &testCase) +} + +func TestMigAdvancedCluster_replicaSetAWSProviderUpdate(t *testing.T) { var ( projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() - config = configSingleProvider(projectID, clusterName) ) resource.ParallelTest(t, resource.TestCase{ @@ -23,21 +51,24 @@ func TestMigAdvancedCluster_singleAWSProvider(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - ExternalProviders: mig.ExternalProviders(), - Config: config, - Check: checkSingleProvider(projectID, clusterName), + ExternalProviders: acc.ExternalProviders(versionBeforeISSRelease), + Config: configReplicaSetAWSProvider(projectID, clusterName, 60, 3), + Check: checkReplicaSetAWSProvider(projectID, clusterName, 60, 3, false, false), + }, + { + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Config: configReplicaSetAWSProvider(projectID, clusterName, 60, 5), + Check: checkReplicaSetAWSProvider(projectID, clusterName, 60, 5, true, true), }, - mig.TestStepCheckEmptyPlan(config), }, }) } -func TestMigAdvancedCluster_multiCloud(t *testing.T) { +func TestMigAdvancedCluster_geoShardedOldSchemaUpdate(t *testing.T) { var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region clusterName = acc.RandomClusterName() - config = configMultiCloud(orgID, projectName, clusterName) ) resource.ParallelTest(t, resource.TestCase{ @@ -45,11 +76,75 @@ func TestMigAdvancedCluster_multiCloud(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - ExternalProviders: mig.ExternalProviders(), - Config: config, - Check: checkMultiCloud(clusterName, 3), + ExternalProviders: acc.ExternalProviders(versionBeforeISSRelease), + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 2, 2, false), + Check: checkGeoShardedOldSchema(clusterName, 2, 2, false, false), + }, + { + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 2, 1, false), + Check: checkGeoShardedOldSchema(clusterName, 2, 1, true, false), + }, + }, + }) +} + +func TestMigAdvancedCluster_shardedMigrationFromOldToNewSchema(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { mig.PreCheckBasic(t) }, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + ExternalProviders: acc.ExternalProviders(versionBeforeISSRelease), + Config: configShardedTransitionOldToNewSchema(orgID, projectName, clusterName, false), + Check: checkShardedTransitionOldToNewSchema(false), + }, + { + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Config: configShardedTransitionOldToNewSchema(orgID, projectName, clusterName, true), + ExpectError: regexp.MustCompile("SERVICE_UNAVAILABLE"), + }, + { + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Config: configShardedTransitionOldToNewSchema(orgID, projectName, clusterName, true), + Check: checkShardedTransitionOldToNewSchema(true), + }, + }, + }) +} + +func TestMigAdvancedCluster_geoShardedMigrationFromOldToNewSchema(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { mig.PreCheckBasic(t) }, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + ExternalProviders: acc.ExternalProviders(versionBeforeISSRelease), + Config: configGeoShardedTransitionOldToNewSchema(orgID, projectName, clusterName, false), + Check: checkGeoShardedTransitionOldToNewSchema(false), + }, + { + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Config: configShardedTransitionOldToNewSchema(orgID, projectName, clusterName, true), + ExpectError: regexp.MustCompile("SERVICE_UNAVAILABLE"), + }, + { + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Config: configGeoShardedTransitionOldToNewSchema(orgID, projectName, clusterName, true), + Check: checkGeoShardedTransitionOldToNewSchema(true), }, - mig.TestStepCheckEmptyPlan(config), }, }) } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_state_upgrader_test.go b/internal/service/advancedcluster/resource_advanced_cluster_state_upgrader_test.go index f860d49b4d..eafce884cb 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_state_upgrader_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_state_upgrader_test.go @@ -109,7 +109,7 @@ func TestMigAdvancedCluster_v0StateUpgrade_ReplicationSpecs(t *testing.T) { v0Config := terraform.NewResourceConfigRaw(v0State) diags := advancedcluster.ResourceV0().Validate(v0Config) - if len(diags) > 0 { + if diags.HasError() { fmt.Println(diags) t.Error("test precondition failed - invalid mongodb cluster v0 config") @@ -121,7 +121,7 @@ func TestMigAdvancedCluster_v0StateUpgrade_ReplicationSpecs(t *testing.T) { v1Config := terraform.NewResourceConfigRaw(v1State) diags = advancedcluster.Resource().Validate(v1Config) - if len(diags) > 0 { + if diags.HasError() { fmt.Println(diags) t.Error("migrated advanced cluster replication_specs invalid") diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index aa4f3efcda..beba9619ae 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -1,18 +1,21 @@ package advancedcluster_test import ( - "context" "fmt" "os" "regexp" "strconv" "testing" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) const ( @@ -50,20 +53,28 @@ func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) { }) } -func TestAccClusterAdvancedCluster_singleProvider(t *testing.T) { +func TestAccClusterAdvancedCluster_replicaSetAWSProvider(t *testing.T) { + resource.ParallelTest(t, replicaSetAWSProviderTestCase(t)) +} +func replicaSetAWSProviderTestCase(t *testing.T) resource.TestCase { + t.Helper() var ( projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() ) - resource.ParallelTest(t, resource.TestCase{ + return resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configSingleProvider(projectID, clusterName), - Check: checkSingleProvider(projectID, clusterName), + Config: configReplicaSetAWSProvider(projectID, clusterName, 60, 3), + Check: checkReplicaSetAWSProvider(projectID, clusterName, 60, 3, true, true), + }, + { + Config: configReplicaSetAWSProvider(projectID, clusterName, 50, 5), + Check: checkReplicaSetAWSProvider(projectID, clusterName, 50, 5, true, true), }, { ResourceName: resourceName, @@ -73,10 +84,14 @@ func TestAccClusterAdvancedCluster_singleProvider(t *testing.T) { ImportStateVerifyIgnore: []string{"replication_specs", "retain_backups_enabled"}, }, }, - }) + } } -func TestAccClusterAdvancedCluster_multicloud(t *testing.T) { +func TestAccClusterAdvancedCluster_replicaSetMultiCloud(t *testing.T) { + resource.ParallelTest(t, replicaSetMultiCloudTestCase(t)) +} +func replicaSetMultiCloudTestCase(t *testing.T) resource.TestCase { + t.Helper() var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region @@ -84,18 +99,18 @@ func TestAccClusterAdvancedCluster_multicloud(t *testing.T) { clusterNameUpdated = acc.RandomClusterName() ) - resource.ParallelTest(t, resource.TestCase{ + return resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configMultiCloud(orgID, projectName, clusterName), - Check: checkMultiCloud(clusterName, 3), + Config: configReplicaSetMultiCloud(orgID, projectName, clusterName), + Check: checkReplicaSetMultiCloud(clusterName, 3), }, { - Config: configMultiCloud(orgID, projectName, clusterNameUpdated), - Check: checkMultiCloud(clusterNameUpdated, 3), + Config: configReplicaSetMultiCloud(orgID, projectName, clusterNameUpdated), + Check: checkReplicaSetMultiCloud(clusterNameUpdated, 3), }, { ResourceName: resourceName, @@ -105,10 +120,15 @@ func TestAccClusterAdvancedCluster_multicloud(t *testing.T) { ImportStateVerifyIgnore: []string{"replication_specs", "retain_backups_enabled"}, }, }, - }) + } +} + +func TestAccClusterAdvancedCluster_singleShardedMultiCloud(t *testing.T) { + resource.ParallelTest(t, singleShardedMultiCloudTestCase(t)) } -func TestAccClusterAdvancedCluster_multicloudSharded(t *testing.T) { +func singleShardedMultiCloudTestCase(t *testing.T) resource.TestCase { + t.Helper() var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region @@ -116,18 +136,18 @@ func TestAccClusterAdvancedCluster_multicloudSharded(t *testing.T) { clusterNameUpdated = acc.RandomClusterName() ) - resource.ParallelTest(t, resource.TestCase{ + return resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configMultiCloudSharded(orgID, projectName, clusterName), - Check: checkMultiCloudSharded(clusterName), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 1, "M10"), + Check: checkShardedOldSchemaMultiCloud(clusterName, 1, "M10", true), }, { - Config: configMultiCloudSharded(orgID, projectName, clusterNameUpdated), - Check: checkMultiCloudSharded(clusterNameUpdated), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterNameUpdated, 1, "M10"), + Check: checkShardedOldSchemaMultiCloud(clusterNameUpdated, 1, "M10", true), }, { ResourceName: resourceName, @@ -137,7 +157,7 @@ func TestAccClusterAdvancedCluster_multicloudSharded(t *testing.T) { ImportStateVerifyIgnore: []string{"replication_specs"}, }, }, - }) + } } func TestAccClusterAdvancedCluster_unpausedToPaused(t *testing.T) { @@ -312,13 +332,13 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAutoScaling(t *testing. projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - autoScaling = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScaling = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } - autoScalingUpdated = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScalingUpdated = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } ) @@ -354,13 +374,13 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAnalyticsAutoScaling(t projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() clusterNameUpdated = acc.RandomClusterName() - autoScaling = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScaling = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(false), MaxInstanceSize: conversion.StringPtr("")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } - autoScalingUpdated = &admin20240530.AdvancedAutoScalingSettings{ - Compute: &admin20240530.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, - DiskGB: &admin20240530.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, + autoScalingUpdated = &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{Enabled: conversion.Pointer(true), MaxInstanceSize: conversion.StringPtr("M20")}, + DiskGB: &admin.DiskGBAutoScaling{Enabled: conversion.Pointer(true)}, } ) @@ -391,7 +411,7 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAnalyticsAutoScaling(t }) } -func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *testing.T) { +func TestAccClusterAdvancedClusterConfig_singleShardedTransitionToOldSchemaExpectsError(t *testing.T) { var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region @@ -404,12 +424,12 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *tes CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false), - Check: checkMultiZoneWithShards(clusterName, 1, 1), + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 1, 1, false), + Check: checkGeoShardedOldSchema(clusterName, 1, 1, true, true), }, { - Config: configMultiZoneWithShards(orgID, projectName, clusterName, 2, 1, false), - Check: checkMultiZoneWithShards(clusterName, 2, 1), + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 1, 2, false), + ExpectError: regexp.MustCompile(advancedcluster.ErrorOperationNotPermitted), }, }, }) @@ -456,7 +476,7 @@ func TestAccClusterAdvancedClusterConfig_selfManagedSharding(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, true), + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 1, 1, true), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "true"), @@ -464,7 +484,7 @@ func TestAccClusterAdvancedClusterConfig_selfManagedSharding(t *testing.T) { ), }, { - Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false), + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 1, 1, false), ExpectError: regexp.MustCompile("CANNOT_MODIFY_GLOBAL_CLUSTER_MANAGEMENT_SETTING"), }, }, @@ -490,6 +510,212 @@ func TestAccClusterAdvancedClusterConfig_selfManagedShardingIncorrectType(t *tes }) } +func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchema(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M10"), + Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M10", false), + }, + { + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M20"), + Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M20", false), + }, + }, + }) +} + +func TestAccClusterAdvancedClusterConfig_symmetricGeoShardedOldSchema(t *testing.T) { + resource.ParallelTest(t, symmetricGeoShardedOldSchemaTestCase(t)) +} + +func symmetricGeoShardedOldSchemaTestCase(t *testing.T) resource.TestCase { + t.Helper() + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region + clusterName = acc.RandomClusterName() + ) + + return resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 2, 2, false), + Check: checkGeoShardedOldSchema(clusterName, 2, 2, true, false), + }, + { + Config: configGeoShardedOldSchema(orgID, projectName, clusterName, 3, 3, false), + Check: checkGeoShardedOldSchema(clusterName, 3, 3, true, false), + }, + }, + } +} + +func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchemaDiskSizeGBAtElectableLevel(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName, 50), + Check: checkShardedOldSchemaDiskSizeGBElectableLevel(50), + }, + { + Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName, 55), + Check: checkShardedOldSchemaDiskSizeGBElectableLevel(55), + }, + }, + }) +} + +func TestAccClusterAdvancedClusterConfig_symmetricShardedNewSchemaToAsymmetricAddingRemovingShard(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configShardedNewSchema(orgID, projectName, clusterName, 50, "M10", "M10", nil, nil, false), + Check: checkShardedNewSchema(50, "M10", "M10", nil, nil, false, false), + }, + { + Config: configShardedNewSchema(orgID, projectName, clusterName, 55, "M10", "M20", nil, nil, true), // add middle replication spec and transition to asymmetric + Check: checkShardedNewSchema(55, "M10", "M20", nil, nil, true, true), + }, + { + Config: configShardedNewSchema(orgID, projectName, clusterName, 55, "M10", "M20", nil, nil, false), // removes middle replication spec + Check: checkShardedNewSchema(55, "M10", "M20", nil, nil, true, false), + }, + }, + }) +} + +func TestAccClusterAdvancedClusterConfig_asymmetricShardedNewSchema(t *testing.T) { + resource.ParallelTest(t, asymmetricShardedNewSchemaTestCase(t)) +} + +func asymmetricShardedNewSchemaTestCase(t *testing.T) resource.TestCase { + t.Helper() + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + return resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configShardedNewSchema(orgID, projectName, clusterName, 50, "M30", "M40", admin.PtrInt(2000), admin.PtrInt(2500), false), + Check: checkShardedNewSchema(50, "M30", "M40", admin.PtrInt(2000), admin.PtrInt(2500), true, false), + }, + }, + } +} + +func TestAccClusterAdvancedClusterConfig_asymmetricGeoShardedNewSchemaAddingRemovingShard(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configGeoShardedNewSchema(orgID, projectName, clusterName, false), + Check: checkGeoShardedNewSchema(false), + }, + { + Config: configGeoShardedNewSchema(orgID, projectName, clusterName, true), + Check: checkGeoShardedNewSchema(true), + }, + { + Config: configGeoShardedNewSchema(orgID, projectName, clusterName, false), + Check: checkGeoShardedNewSchema(false), + }, + }, + }) +} + +func TestAccClusterAdvancedClusterConfig_shardedTransitionFromOldToNewSchema(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configShardedTransitionOldToNewSchema(orgID, projectName, clusterName, false), + Check: checkShardedTransitionOldToNewSchema(false), + }, + { + Config: configShardedTransitionOldToNewSchema(orgID, projectName, clusterName, true), + Check: checkShardedTransitionOldToNewSchema(true), + }, + }, + }) +} + +func TestAccClusterAdvancedClusterConfig_geoShardedTransitionFromOldToNewSchema(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configGeoShardedTransitionOldToNewSchema(orgID, projectName, clusterName, false), + Check: checkGeoShardedTransitionOldToNewSchema(false), + }, + { + Config: configGeoShardedTransitionOldToNewSchema(orgID, projectName, clusterName, true), + Check: checkGeoShardedTransitionOldToNewSchema(true), + }, + }, + }) +} + func checkAggr(attrsSet []string, attrsMap map[string]string, extra ...resource.TestCheckFunc) resource.TestCheckFunc { checks := []resource.TestCheckFunc{checkExists(resourceName)} checks = acc.AddAttrChecks(resourceName, checks, attrsMap) @@ -510,10 +736,11 @@ func checkExists(resourceName string) resource.TestCheckFunc { return fmt.Errorf("no ID is set") } ids := conversion.DecodeStateID(rs.Primary.ID) - if _, _, err := acc.ConnV2().ClustersApi.GetCluster(context.Background(), ids["project_id"], ids["cluster_name"]).Execute(); err == nil { + err := acc.CheckClusterExistsHandlingRetry(ids["project_id"], ids["cluster_name"]) + if err == nil { return nil } - return fmt.Errorf("cluster(%s:%s) does not exist", rs.Primary.Attributes["project_id"], rs.Primary.ID) + return fmt.Errorf("cluster(%s:%s) does not exist: %w", rs.Primary.Attributes["project_id"], rs.Primary.ID, err) } } @@ -552,7 +779,7 @@ func checkTenant(projectID, name string) resource.TestCheckFunc { pluralChecks := acc.AddAttrSetChecks(dataSourcePluralName, nil, []string{"results.#", "results.0.replication_specs.#", "results.0.name", "results.0.termination_protection_enabled", "results.0.global_cluster_self_managed_sharding"}...) return checkAggr( - []string{"replication_specs.#", "replication_specs.0.region_configs.#"}, + []string{"replication_specs.#", "replication_specs.0.id", "replication_specs.0.region_configs.#"}, map[string]string{ "project_id": projectID, "name": name, @@ -635,19 +862,20 @@ func checkTags(name string, tags ...map[string]string) resource.TestCheckFunc { tagChecks...) } -func configSingleProvider(projectID, name string) string { +func configReplicaSetAWSProvider(projectID, name string, diskSizeGB, nodeCountElectable int) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q name = %[2]q cluster_type = "REPLICASET" retain_backups_enabled = "true" + disk_size_gb = %[3]d replication_specs { region_configs { electable_specs { instance_size = "M10" - node_count = 3 + node_count = %[4]d } analytics_specs { instance_size = "M10" @@ -664,20 +892,37 @@ func configSingleProvider(projectID, name string) string { project_id = mongodbatlas_advanced_cluster.test.project_id name = mongodbatlas_advanced_cluster.test.name } - `, projectID, name) + `, projectID, name, diskSizeGB, nodeCountElectable) } -func checkSingleProvider(projectID, name string) resource.TestCheckFunc { - return checkAggr( - []string{"replication_specs.#", "replication_specs.0.region_configs.#"}, - map[string]string{ - "project_id": projectID, - "name": name}, +func checkReplicaSetAWSProvider(projectID, name string, diskSizeGB, nodeCountElectable int, checkDiskSizeGBInnerLevel, checkExternalID bool) resource.TestCheckFunc { + additionalChecks := []resource.TestCheckFunc{ resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), - resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), - resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0))) + } + if checkDiskSizeGBInnerLevel { + additionalChecks = append(additionalChecks, + checkAggr([]string{}, map[string]string{ + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + }), + ) + } + + if checkExternalID { + additionalChecks = append(additionalChecks, resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.external_id")) + } + + return checkAggr( + []string{"replication_specs.#", "replication_specs.0.id", "replication_specs.0.region_configs.#"}, + map[string]string{ + "project_id": projectID, + "disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.electable_specs.0.node_count": fmt.Sprintf("%d", nodeCountElectable), + "name": name}, + additionalChecks..., + ) } func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string { @@ -708,7 +953,7 @@ func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) `, projectID, name) } -func configMultiCloud(orgID, projectName, name string) string { +func configReplicaSetMultiCloud(orgID, projectName, name string) string { return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -767,11 +1012,8 @@ func configMultiCloud(orgID, projectName, name string) string { `, orgID, projectName, name) } -func checkMultiCloud(name string, regionConfigs int) resource.TestCheckFunc { - return checkAggr( - []string{"project_id", "replication_specs.#"}, - map[string]string{ - "name": name}, +func checkReplicaSetMultiCloud(name string, regionConfigs int) resource.TestCheckFunc { + additionalChecks := []resource.TestCheckFunc{ resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "false"), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.#", acc.JSONEquals(strconv.Itoa(regionConfigs))), resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.#", acc.JSONEquals(strconv.Itoa(regionConfigs))), @@ -779,10 +1021,17 @@ func checkMultiCloud(name string, regionConfigs int) resource.TestCheckFunc { resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.#"), resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.replication_specs.#"), resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name"), + resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.external_id"), + } + return checkAggr( + []string{"project_id", "replication_specs.#", "replication_specs.0.id"}, + map[string]string{ + "name": name}, + additionalChecks..., ) } -func configMultiCloudSharded(orgID, projectName, name string) string { +func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards int, analyticsSize string) string { return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -795,14 +1044,14 @@ func configMultiCloudSharded(orgID, projectName, name string) string { cluster_type = "SHARDED" replication_specs { - num_shards = 1 + num_shards = %[4]d region_configs { electable_specs { - instance_size = "M30" + instance_size = "M10" node_count = 3 } analytics_specs { - instance_size = "M30" + instance_size = %[5]q node_count = 1 } provider_name = "AWS" @@ -811,7 +1060,7 @@ func configMultiCloudSharded(orgID, projectName, name string) string { } region_configs { electable_specs { - instance_size = "M30" + instance_size = "M10" node_count = 2 } provider_name = "AZURE" @@ -825,20 +1074,33 @@ func configMultiCloudSharded(orgID, projectName, name string) string { project_id = mongodbatlas_advanced_cluster.test.project_id name = mongodbatlas_advanced_cluster.test.name } - `, orgID, projectName, name) + `, orgID, projectName, name, numShards, analyticsSize) } -func checkMultiCloudSharded(name string) resource.TestCheckFunc { - return checkAggr( - []string{"project_id", "replication_specs.#", "replication_specs.0.region_configs.#"}, - map[string]string{ - "name": name}, +func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize string, verifyExternalID bool) resource.TestCheckFunc { + additionalChecks := []resource.TestCheckFunc{ resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.1.electable_specs.0.disk_iops", acc.IntGreatThan(0)), resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), - resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.1.electable_specs.0.disk_iops", acc.IntGreatThan(0))) + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.1.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + } + + if verifyExternalID { + additionalChecks = append( + additionalChecks, + resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.external_id")) + } + + return checkAggr( + []string{"project_id", "replication_specs.#", "replication_specs.0.id", "replication_specs.0.region_configs.#"}, + map[string]string{ + "name": name, + "replication_specs.0.num_shards": strconv.Itoa(numShards), + "replication_specs.0.region_configs.0.analytics_specs.0.instance_size": analyticsSize, + }, + additionalChecks...) } func configSingleProviderPaused(projectID, clusterName string, paused bool, instanceSize string) string { @@ -1013,7 +1275,7 @@ func checkAdvancedDefaultWrite(name, writeConcern, tls string) resource.TestChec resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.name")) } -func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin20240530.AdvancedAutoScalingSettings) string { +func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q @@ -1044,7 +1306,7 @@ func configReplicationSpecsAutoScaling(projectID, clusterName string, p *admin20 `, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize()) } -func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p *admin20240530.AdvancedAutoScalingSettings) string { +func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p *admin.AdvancedAutoScalingSettings) string { return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { project_id = %[1]q @@ -1075,7 +1337,7 @@ func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p `, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize()) } -func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string { +func configGeoShardedOldSchema(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string { return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -1089,6 +1351,7 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo mongo_db_major_version = "7.0" cluster_type = "GEOSHARDED" global_cluster_self_managed_sharding = %[6]t + disk_size_gb = 60 replication_specs { zone_name = "zone n1" @@ -1136,12 +1399,521 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo `, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone, selfManagedSharding) } -func checkMultiZoneWithShards(name string, numShardsFirstZone, numShardsSecondZone int) resource.TestCheckFunc { +func checkGeoShardedOldSchema(name string, numShardsFirstZone, numShardsSecondZone int, isLatestProviderVersion, verifyExternalID bool) resource.TestCheckFunc { + additionalChecks := []resource.TestCheckFunc{} + + if verifyExternalID { + additionalChecks = append(additionalChecks, resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.external_id")) + } + + if isLatestProviderVersion { // checks that will not apply if doing migration test with older version + additionalChecks = append(additionalChecks, checkAggr( + []string{"replication_specs.0.zone_id", "replication_specs.0.zone_id"}, + map[string]string{ + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60", + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60", + })) + } + return checkAggr( - []string{"project_id"}, + []string{"project_id", "replication_specs.0.id", "replication_specs.1.id"}, map[string]string{ "name": name, + "disk_size_gb": "60", "replication_specs.0.num_shards": strconv.Itoa(numShardsFirstZone), "replication_specs.1.num_shards": strconv.Itoa(numShardsSecondZone), + }, + additionalChecks..., + ) +} + +func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name string, diskSizeGB int) string { + return fmt.Sprintf(` + resource "mongodbatlas_project" "cluster_project" { + org_id = %[1]q + name = %[2]q + } + + resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.cluster_project.id + name = %[3]q + backup_enabled = false + mongo_db_major_version = "7.0" + cluster_type = "SHARDED" + + replication_specs { + num_shards = 2 + + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + disk_size_gb = %[4]d + } + analytics_specs { + instance_size = "M10" + node_count = 0 + disk_size_gb = %[4]d + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + } + + data "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + name = mongodbatlas_advanced_cluster.test.name + } + `, orgID, projectName, name, diskSizeGB) +} + +func checkShardedOldSchemaDiskSizeGBElectableLevel(diskSizeGB int) resource.TestCheckFunc { + return checkAggr( + []string{}, + map[string]string{ + "replication_specs.0.num_shards": "2", + "disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + }) +} + +func configShardedNewSchema(orgID, projectName, name string, diskSizeGB int, firstInstanceSize, lastInstanceSize string, firstDiskIOPS, lastDiskIOPS *int, includeMiddleSpec bool) string { + var thirdReplicationSpec string + if includeMiddleSpec { + thirdReplicationSpec = fmt.Sprintf(` + replication_specs { + region_configs { + electable_specs { + instance_size = %[1]q + node_count = 3 + disk_size_gb = %[2]d + } + analytics_specs { + instance_size = %[1]q + node_count = 1 + disk_size_gb = %[2]d + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + `, firstInstanceSize, diskSizeGB) + } + var firstDiskIOPSAttrs string + if firstDiskIOPS != nil { + firstDiskIOPSAttrs = fmt.Sprintf(` + disk_iops = %d + ebs_volume_type = "PROVISIONED" + `, *firstDiskIOPS) + } + var lastDiskIOPSAttrs string + if lastDiskIOPS != nil { + lastDiskIOPSAttrs = fmt.Sprintf(` + disk_iops = %d + ebs_volume_type = "PROVISIONED" + `, *lastDiskIOPS) + } + return fmt.Sprintf(` + resource "mongodbatlas_project" "cluster_project" { + org_id = %[1]q + name = %[2]q + } + + resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.cluster_project.id + name = %[3]q + backup_enabled = false + cluster_type = "SHARDED" + + replication_specs { + region_configs { + electable_specs { + instance_size = %[4]q + node_count = 3 + disk_size_gb = %[9]d + %[6]s + } + analytics_specs { + instance_size = %[4]q + node_count = 1 + disk_size_gb = %[9]d + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + + %[8]s + + replication_specs { + region_configs { + electable_specs { + instance_size = %[5]q + node_count = 3 + disk_size_gb = %[9]d + %[7]s + } + analytics_specs { + instance_size = %[5]q + node_count = 1 + disk_size_gb = %[9]d + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + } + + data "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + name = mongodbatlas_advanced_cluster.test.name + use_replication_spec_per_shard = true + } + + data "mongodbatlas_advanced_clusters" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + use_replication_spec_per_shard = true + } + `, orgID, projectName, name, firstInstanceSize, lastInstanceSize, firstDiskIOPSAttrs, lastDiskIOPSAttrs, thirdReplicationSpec, diskSizeGB) +} + +func checkShardedNewSchema(diskSizeGB int, firstInstanceSize, lastInstanceSize string, firstDiskIops, lastDiskIops *int, isAsymmetricCluster, includeMiddleSpec bool) resource.TestCheckFunc { + amtOfReplicationSpecs := 2 + if includeMiddleSpec { + amtOfReplicationSpecs = 3 + } + + lastSpecIndex := 1 + if includeMiddleSpec { + lastSpecIndex = 2 + } + + clusterChecks := map[string]string{ + "disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + "replication_specs.#": fmt.Sprintf("%d", amtOfReplicationSpecs), + "replication_specs.0.region_configs.0.electable_specs.0.instance_size": firstInstanceSize, + fmt.Sprintf("replication_specs.%d.region_configs.0.electable_specs.0.instance_size", lastSpecIndex): lastInstanceSize, + "replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + fmt.Sprintf("replication_specs.%d.region_configs.0.electable_specs.0.disk_size_gb", lastSpecIndex): fmt.Sprintf("%d", diskSizeGB), + "replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": fmt.Sprintf("%d", diskSizeGB), + fmt.Sprintf("replication_specs.%d.region_configs.0.analytics_specs.0.disk_size_gb", lastSpecIndex): fmt.Sprintf("%d", diskSizeGB), + } + if firstDiskIops != nil { + clusterChecks["replication_specs.0.region_configs.0.electable_specs.0.disk_iops"] = fmt.Sprintf("%d", *firstDiskIops) + } + if lastDiskIops != nil { + clusterChecks[fmt.Sprintf("replication_specs.%d.region_configs.0.electable_specs.0.disk_iops", lastSpecIndex)] = fmt.Sprintf("%d", *lastDiskIops) + } + + // plural data source checks + additionalChecks := acc.AddAttrSetChecks(dataSourcePluralName, nil, + []string{"results.#", "results.0.replication_specs.#", "results.0.replication_specs.0.region_configs.#", "results.0.name", "results.0.termination_protection_enabled", "results.0.global_cluster_self_managed_sharding"}...) + additionalChecks = acc.AddAttrChecksPrefix(dataSourcePluralName, additionalChecks, clusterChecks, "results.0") + + // expected id attribute only if cluster is symmetric + if isAsymmetricCluster { + additionalChecks = append(additionalChecks, checkAggr([]string{}, map[string]string{ + "replication_specs.0.id": "", + "replication_specs.1.id": "", + })) + additionalChecks = acc.AddAttrChecks(dataSourcePluralName, additionalChecks, map[string]string{ + "results.0.replication_specs.0.id": "", + "results.0.replication_specs.1.id": "", }) + } else { + additionalChecks = append(additionalChecks, checkAggr([]string{"replication_specs.0.id", "replication_specs.1.id"}, map[string]string{})) + additionalChecks = acc.AddAttrSetChecks(dataSourcePluralName, additionalChecks, "results.0.replication_specs.0.id", "results.0.replication_specs.1.id") + } + + return checkAggr( + []string{"replication_specs.0.external_id", "replication_specs.0.zone_id", "replication_specs.1.external_id", "replication_specs.1.zone_id"}, + clusterChecks, + additionalChecks..., + ) +} + +func configGeoShardedNewSchema(orgID, projectName, name string, includeThirdShardInFirstZone bool) string { + var thirdReplicationSpec string + if includeThirdShardInFirstZone { + thirdReplicationSpec = ` + replication_specs { + zone_name = "zone n1" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + ` + } + return fmt.Sprintf(` + resource "mongodbatlas_project" "cluster_project" { + org_id = %[1]q + name = %[2]q + } + resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.cluster_project.id + name = %[3]q + backup_enabled = false + mongo_db_major_version = "7.0" + cluster_type = "GEOSHARDED" + replication_specs { + zone_name = "zone n1" + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + %[4]s + replication_specs { + zone_name = "zone n2" + region_configs { + electable_specs { + instance_size = "M20" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + } + data "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + name = mongodbatlas_advanced_cluster.test.name + use_replication_spec_per_shard = true + } + data "mongodbatlas_advanced_clusters" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + use_replication_spec_per_shard = true + } + `, orgID, projectName, name, thirdReplicationSpec) +} + +func checkGeoShardedNewSchema(includeThirdShardInFirstZone bool) resource.TestCheckFunc { + var amtOfReplicationSpecs int + if includeThirdShardInFirstZone { + amtOfReplicationSpecs = 3 + } else { + amtOfReplicationSpecs = 2 + } + clusterChecks := map[string]string{ + "replication_specs.#": fmt.Sprintf("%d", amtOfReplicationSpecs), + } + + return checkAggr( + []string{}, + clusterChecks, + ) +} + +func configShardedTransitionOldToNewSchema(orgID, projectName, name string, useNewSchema bool) string { + var numShardsStr string + if !useNewSchema { + numShardsStr = `num_shards = 2` + } + replicationSpec := fmt.Sprintf(` + replication_specs { + %[1]s + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + analytics_specs { + instance_size = "M10" + node_count = 1 + } + provider_name = "AWS" + priority = 7 + region_name = "EU_WEST_1" + } + } + `, numShardsStr) + + var replicationSpecs string + if useNewSchema { + replicationSpecs = fmt.Sprintf(` + %[1]s + %[1]s + `, replicationSpec) + } else { + replicationSpecs = replicationSpec + } + + var dataSourceFlag string + if useNewSchema { + dataSourceFlag = `use_replication_spec_per_shard = true` + } + + return fmt.Sprintf(` + resource "mongodbatlas_project" "cluster_project" { + org_id = %[1]q + name = %[2]q + } + + resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.cluster_project.id + name = %[3]q + backup_enabled = false + cluster_type = "SHARDED" + + %[4]s + } + + data "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + name = mongodbatlas_advanced_cluster.test.name + %[5]s + } + + data "mongodbatlas_advanced_clusters" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + %[5]s + } + `, orgID, projectName, name, replicationSpecs, dataSourceFlag) +} + +func checkShardedTransitionOldToNewSchema(useNewSchema bool) resource.TestCheckFunc { + var amtOfReplicationSpecs int + if useNewSchema { + amtOfReplicationSpecs = 2 + } else { + amtOfReplicationSpecs = 1 + } + var checksForNewSchema []resource.TestCheckFunc + if useNewSchema { + checksForNewSchema = []resource.TestCheckFunc{ + checkAggr([]string{"replication_specs.1.id", "replication_specs.0.external_id", "replication_specs.1.external_id"}, + map[string]string{ + "replication_specs.#": fmt.Sprintf("%d", amtOfReplicationSpecs), + "replication_specs.1.region_configs.0.electable_specs.0.instance_size": "M10", + "replication_specs.1.region_configs.0.analytics_specs.0.instance_size": "M10", + }), + } + } + + return checkAggr( + []string{"replication_specs.0.id"}, + map[string]string{ + "replication_specs.#": fmt.Sprintf("%d", amtOfReplicationSpecs), + "replication_specs.0.region_configs.0.electable_specs.0.instance_size": "M10", + "replication_specs.0.region_configs.0.analytics_specs.0.instance_size": "M10", + }, + checksForNewSchema..., + ) +} + +func configGeoShardedTransitionOldToNewSchema(orgID, projectName, name string, useNewSchema bool) string { + var numShardsStr string + if !useNewSchema { + numShardsStr = `num_shards = 2` + } + replicationSpec := ` + replication_specs { + %[1]s + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + analytics_specs { + instance_size = "M10" + node_count = 1 + } + provider_name = "AWS" + priority = 7 + region_name = %[2]q + } + zone_name = %[3]q + } + ` + + var replicationSpecs string + if !useNewSchema { + replicationSpecs = fmt.Sprintf(` + %[1]s + %[2]s + `, fmt.Sprintf(replicationSpec, numShardsStr, "US_EAST_1", "zone 1"), fmt.Sprintf(replicationSpec, numShardsStr, "EU_WEST_1", "zone 2")) + } else { + replicationSpecs = fmt.Sprintf(` + %[1]s + %[2]s + %[3]s + %[4]s + `, fmt.Sprintf(replicationSpec, numShardsStr, "US_EAST_1", "zone 1"), fmt.Sprintf(replicationSpec, numShardsStr, "US_EAST_1", "zone 1"), + fmt.Sprintf(replicationSpec, numShardsStr, "EU_WEST_1", "zone 2"), fmt.Sprintf(replicationSpec, numShardsStr, "EU_WEST_1", "zone 2")) + } + + var dataSourceFlag string + if useNewSchema { + dataSourceFlag = `use_replication_spec_per_shard = true` + } + + return fmt.Sprintf(` + resource "mongodbatlas_project" "cluster_project" { + org_id = %[1]q + name = %[2]q + } + + resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.cluster_project.id + name = %[3]q + backup_enabled = false + cluster_type = "GEOSHARDED" + + %[4]s + } + + data "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + name = mongodbatlas_advanced_cluster.test.name + %[5]s + } + + data "mongodbatlas_advanced_clusters" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + %[5]s + } + `, orgID, projectName, name, replicationSpecs, dataSourceFlag) +} + +func checkGeoShardedTransitionOldToNewSchema(useNewSchema bool) resource.TestCheckFunc { + if useNewSchema { + return checkAggr( + []string{"replication_specs.0.id", "replication_specs.1.id", "replication_specs.2.id", "replication_specs.3.id", + "replication_specs.0.external_id", "replication_specs.1.external_id", "replication_specs.2.external_id", "replication_specs.3.external_id", + }, + map[string]string{ + "replication_specs.#": "4", + "replication_specs.0.zone_name": "zone 1", + "replication_specs.1.zone_name": "zone 1", + "replication_specs.2.zone_name": "zone 2", + "replication_specs.3.zone_name": "zone 2", + }, + ) + } + return checkAggr( + []string{"replication_specs.0.id", "replication_specs.1.id"}, + map[string]string{ + "replication_specs.#": "2", + "replication_specs.0.zone_name": "zone 1", + "replication_specs.1.zone_name": "zone 2", + }, + ) } diff --git a/internal/service/advancedcluster/resource_update_logic.go b/internal/service/advancedcluster/resource_update_logic.go new file mode 100644 index 0000000000..d518494042 --- /dev/null +++ b/internal/service/advancedcluster/resource_update_logic.go @@ -0,0 +1,100 @@ +package advancedcluster + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "go.mongodb.org/atlas-sdk/v20240805003/admin" +) + +func noIDsPopulatedInReplicationSpecs(replicationSpecs *[]admin.ReplicationSpec20240805) bool { + if replicationSpecs == nil || len(*replicationSpecs) == 0 { + return false + } + for _, spec := range *replicationSpecs { + if conversion.IsStringPresent(spec.Id) { + return false + } + } + return true +} + +func populateIDValuesUsingNewAPI(ctx context.Context, projectID, clusterName string, connV2ClusterAPI admin.ClustersApi, replicationSpecs *[]admin.ReplicationSpec20240805) (*[]admin.ReplicationSpec20240805, diag.Diagnostics) { + if replicationSpecs == nil || len(*replicationSpecs) == 0 { + return replicationSpecs, nil + } + cluster, _, err := connV2ClusterAPI.GetCluster(ctx, projectID, clusterName).Execute() + if err != nil { + return nil, diag.FromErr(fmt.Errorf(errorRead, clusterName, err)) + } + + zoneToReplicationSpecsIDs := groupIDsByZone(cluster.GetReplicationSpecs()) + result := AddIDsToReplicationSpecs(*replicationSpecs, zoneToReplicationSpecsIDs) + return &result, nil +} + +func AddIDsToReplicationSpecs(replicationSpecs []admin.ReplicationSpec20240805, zoneToReplicationSpecsIDs map[string][]string) []admin.ReplicationSpec20240805 { + for zoneName, availableIDs := range zoneToReplicationSpecsIDs { + var indexOfIDToUse = 0 + for i := range replicationSpecs { + if indexOfIDToUse >= len(availableIDs) { + break // all available ids for this zone have been used + } + if replicationSpecs[i].GetZoneName() == zoneName { + newID := availableIDs[indexOfIDToUse] + indexOfIDToUse++ + replicationSpecs[i].Id = &newID + } + } + } + return replicationSpecs +} + +func groupIDsByZone(specs []admin.ReplicationSpec20240805) map[string][]string { + result := make(map[string][]string) + for _, spec := range specs { + result[spec.GetZoneName()] = append(result[spec.GetZoneName()], spec.GetId()) + } + return result +} + +// Having the following considerations: +// - Existing replication specs can have the autoscaling values present in the state with default values even if not defined in the config (case when cluster is imported) +// - API expects autoScaling and analyticsAutoScaling aligned cross all region configs in the PATCH request +// This function is needed to avoid errors if a new replication spec is added, ensuring the PATCH request will have the auto scaling aligned with other replication specs when not present in config. +func SyncAutoScalingConfigs(replicationSpecs *[]admin.ReplicationSpec20240805) { + if replicationSpecs == nil || len(*replicationSpecs) == 0 { + return + } + + var defaultAnalyticsAutoScaling, defaultAutoScaling *admin.AdvancedAutoScalingSettings + + for _, spec := range *replicationSpecs { + for i := range *spec.RegionConfigs { + regionConfig := &(*spec.RegionConfigs)[i] + if regionConfig.AutoScaling != nil && defaultAutoScaling == nil { + defaultAutoScaling = regionConfig.AutoScaling + } + if regionConfig.AnalyticsAutoScaling != nil && defaultAnalyticsAutoScaling == nil { + defaultAnalyticsAutoScaling = regionConfig.AnalyticsAutoScaling + } + } + } + applyDefaultAutoScaling(replicationSpecs, defaultAutoScaling, defaultAnalyticsAutoScaling) +} + +func applyDefaultAutoScaling(replicationSpecs *[]admin.ReplicationSpec20240805, defaultAutoScaling, defaultAnalyticsAutoScaling *admin.AdvancedAutoScalingSettings) { + for _, spec := range *replicationSpecs { + for i := range *spec.RegionConfigs { + regionConfig := &(*spec.RegionConfigs)[i] + if regionConfig.AutoScaling == nil && defaultAutoScaling != nil { + regionConfig.AutoScaling = defaultAutoScaling + } + if regionConfig.AnalyticsAutoScaling == nil && defaultAnalyticsAutoScaling != nil { + regionConfig.AnalyticsAutoScaling = defaultAnalyticsAutoScaling + } + } + } +} diff --git a/internal/service/advancedcluster/resource_update_logic_test.go b/internal/service/advancedcluster/resource_update_logic_test.go new file mode 100644 index 0000000000..f9986b8b13 --- /dev/null +++ b/internal/service/advancedcluster/resource_update_logic_test.go @@ -0,0 +1,292 @@ +package advancedcluster_test + +import ( + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" + "github.com/stretchr/testify/assert" + "go.mongodb.org/atlas-sdk/v20240805003/admin" +) + +func TestAddIDsToReplicationSpecs(t *testing.T) { + testCases := map[string]struct { + ReplicationSpecs []admin.ReplicationSpec20240805 + ZoneToReplicationSpecsIDs map[string][]string + ExpectedReplicationSpecs []admin.ReplicationSpec20240805 + }{ + "two zones with same amount of available ids and replication specs to populate": { + ReplicationSpecs: []admin.ReplicationSpec20240805{ + { + ZoneName: admin.PtrString("Zone 1"), + }, + { + ZoneName: admin.PtrString("Zone 2"), + }, + { + ZoneName: admin.PtrString("Zone 1"), + }, + { + ZoneName: admin.PtrString("Zone 2"), + }, + }, + ZoneToReplicationSpecsIDs: map[string][]string{ + "Zone 1": {"zone1-id1", "zone1-id2"}, + "Zone 2": {"zone2-id1", "zone2-id2"}, + }, + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ + { + ZoneName: admin.PtrString("Zone 1"), + Id: admin.PtrString("zone1-id1"), + }, + { + ZoneName: admin.PtrString("Zone 2"), + Id: admin.PtrString("zone2-id1"), + }, + { + ZoneName: admin.PtrString("Zone 1"), + Id: admin.PtrString("zone1-id2"), + }, + { + ZoneName: admin.PtrString("Zone 2"), + Id: admin.PtrString("zone2-id2"), + }, + }, + }, + "less available ids than replication specs to populate": { + ReplicationSpecs: []admin.ReplicationSpec20240805{ + { + ZoneName: admin.PtrString("Zone 1"), + }, + { + ZoneName: admin.PtrString("Zone 1"), + }, + { + ZoneName: admin.PtrString("Zone 1"), + }, + { + ZoneName: admin.PtrString("Zone 2"), + }, + }, + ZoneToReplicationSpecsIDs: map[string][]string{ + "Zone 1": {"zone1-id1"}, + "Zone 2": {"zone2-id1"}, + }, + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ + { + ZoneName: admin.PtrString("Zone 1"), + Id: admin.PtrString("zone1-id1"), + }, + { + ZoneName: admin.PtrString("Zone 1"), + Id: nil, + }, + { + ZoneName: admin.PtrString("Zone 1"), + Id: nil, + }, + { + ZoneName: admin.PtrString("Zone 2"), + Id: admin.PtrString("zone2-id1"), + }, + }, + }, + "more available ids than replication specs to populate": { + ReplicationSpecs: []admin.ReplicationSpec20240805{ + { + ZoneName: admin.PtrString("Zone 1"), + }, + { + ZoneName: admin.PtrString("Zone 2"), + }, + }, + ZoneToReplicationSpecsIDs: map[string][]string{ + "Zone 1": {"zone1-id1", "zone1-id2"}, + "Zone 2": {"zone2-id1", "zone2-id2"}, + }, + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ + { + ZoneName: admin.PtrString("Zone 1"), + Id: admin.PtrString("zone1-id1"), + }, + { + ZoneName: admin.PtrString("Zone 2"), + Id: admin.PtrString("zone2-id1"), + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + resultSpecs := advancedcluster.AddIDsToReplicationSpecs(tc.ReplicationSpecs, tc.ZoneToReplicationSpecsIDs) + assert.Equal(t, tc.ExpectedReplicationSpecs, resultSpecs) + }) + } +} + +func TestSyncAutoScalingConfigs(t *testing.T) { + testCases := map[string]struct { + ReplicationSpecs []admin.ReplicationSpec20240805 + ExpectedReplicationSpecs []admin.ReplicationSpec20240805 + }{ + "apply same autoscaling options for new replication spec which does not have autoscaling defined": { + ReplicationSpecs: []admin.ReplicationSpec20240805{ + { + Id: admin.PtrString("id-1"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + }, + }, + }, + { + Id: admin.PtrString("id-2"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: nil, + AnalyticsAutoScaling: nil, + }, + }, + }, + }, + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ + { + Id: admin.PtrString("id-1"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + }, + }, + }, + { + Id: admin.PtrString("id-2"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + }, + }, + }, + }, + }, + // for this case the API will respond with an error and guide the user to align autoscaling options cross all nodes + "when different autoscaling options are defined values will not be changed": { + ReplicationSpecs: []admin.ReplicationSpec20240805{ + { + Id: admin.PtrString("id-1"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(true), + ScaleDownEnabled: admin.PtrBool(true), + }, + }, + }, + }, + }, + { + Id: admin.PtrString("id-2"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(true), + }, + }, + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + }, + }, + }, + }, + }, + }, + ExpectedReplicationSpecs: []admin.ReplicationSpec20240805{ + { + Id: admin.PtrString("id-1"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + ScaleDownEnabled: admin.PtrBool(false), + }, + }, + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(true), + ScaleDownEnabled: admin.PtrBool(true), + }, + }, + }, + }, + }, + { + Id: admin.PtrString("id-2"), + RegionConfigs: &[]admin.CloudRegionConfig20240805{ + { + AutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(true), + }, + }, + AnalyticsAutoScaling: &admin.AdvancedAutoScalingSettings{ + Compute: &admin.AdvancedComputeAutoScaling{ + Enabled: admin.PtrBool(false), + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + specs := &tc.ReplicationSpecs + advancedcluster.SyncAutoScalingConfigs(specs) + assert.Equal(t, tc.ExpectedReplicationSpecs, *specs) + }) + } +} diff --git a/internal/service/alertconfiguration/data_source_alert_configuration.go b/internal/service/alertconfiguration/data_source_alert_configuration.go index 8909a10c18..182fcf0099 100644 --- a/internal/service/alertconfiguration/data_source_alert_configuration.go +++ b/internal/service/alertconfiguration/data_source_alert_configuration.go @@ -14,7 +14,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/zclconf/go-cty/cty" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var _ datasource.DataSource = &alertConfigurationDS{} diff --git a/internal/service/alertconfiguration/data_source_alert_configurations.go b/internal/service/alertconfiguration/data_source_alert_configurations.go index f3ab30d2bf..2161f7979c 100644 --- a/internal/service/alertconfiguration/data_source_alert_configurations.go +++ b/internal/service/alertconfiguration/data_source_alert_configurations.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const alertConfigurationsDataSourceName = "alert_configurations" diff --git a/internal/service/alertconfiguration/model_alert_configuration.go b/internal/service/alertconfiguration/model_alert_configuration.go index 2c7e6571e5..4f7fca83cd 100644 --- a/internal/service/alertconfiguration/model_alert_configuration.go +++ b/internal/service/alertconfiguration/model_alert_configuration.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewNotificationList(list []TfNotificationModel) (*[]admin.AlertsNotificationRootForGroup, error) { diff --git a/internal/service/alertconfiguration/model_alert_configuration_test.go b/internal/service/alertconfiguration/model_alert_configuration_test.go index ac63c13e83..0d8704b59f 100644 --- a/internal/service/alertconfiguration/model_alert_configuration_test.go +++ b/internal/service/alertconfiguration/model_alert_configuration_test.go @@ -7,7 +7,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/alertconfiguration" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/alertconfiguration/resource_alert_configuration.go b/internal/service/alertconfiguration/resource_alert_configuration.go index 24080129b6..de1cd6e7c0 100644 --- a/internal/service/alertconfiguration/resource_alert_configuration.go +++ b/internal/service/alertconfiguration/resource_alert_configuration.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/apikey/data_source_api_keys.go b/internal/service/apikey/data_source_api_keys.go index 19744e8f27..4e5cd62812 100644 --- a/internal/service/apikey/data_source_api_keys.go +++ b/internal/service/apikey/data_source_api_keys.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/apikey/resource_api_key.go b/internal/service/apikey/resource_api_key.go index f6731c64d4..c3262f870f 100644 --- a/internal/service/apikey/resource_api_key.go +++ b/internal/service/apikey/resource_api_key.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/atlasuser/data_source_atlas_user.go b/internal/service/atlasuser/data_source_atlas_user.go index 7a662e55e4..27c83e51a7 100644 --- a/internal/service/atlasuser/data_source_atlas_user.go +++ b/internal/service/atlasuser/data_source_atlas_user.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/atlasuser/data_source_atlas_user_test.go b/internal/service/atlasuser/data_source_atlas_user_test.go index 56ace2bf2d..aaa6f397a2 100644 --- a/internal/service/atlasuser/data_source_atlas_user_test.go +++ b/internal/service/atlasuser/data_source_atlas_user_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func TestAccConfigDSAtlasUser_ByUserID(t *testing.T) { diff --git a/internal/service/atlasuser/data_source_atlas_users.go b/internal/service/atlasuser/data_source_atlas_users.go index e036e942b3..a5c8977d51 100644 --- a/internal/service/atlasuser/data_source_atlas_users.go +++ b/internal/service/atlasuser/data_source_atlas_users.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/atlasuser/data_source_atlas_users_test.go b/internal/service/atlasuser/data_source_atlas_users_test.go index af0baf55c4..5a5ba0f55a 100644 --- a/internal/service/atlasuser/data_source_atlas_users_test.go +++ b/internal/service/atlasuser/data_source_atlas_users_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/atlasuser" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func TestAccConfigDSAtlasUsers_ByOrgID(t *testing.T) { diff --git a/internal/service/auditing/resource_auditing.go b/internal/service/auditing/resource_auditing.go index 91cffaf374..3aa3feca46 100644 --- a/internal/service/auditing/resource_auditing.go +++ b/internal/service/auditing/resource_auditing.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go index 39448e2024..c63ed964d5 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy.go @@ -8,7 +8,7 @@ import ( "net/http" "strings" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go index 76b726cc40..809d939922 100644 --- a/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/data_source_cloud_backup_schedule.go @@ -7,7 +7,12 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cluster" + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" +) + +const ( + AsymmetricShardsUnsupportedActionDS = "Ensure you use copy_settings.#.zone_id instead of copy_settings.#.replication_spec_id for asymmetric sharded clusters by setting `use_zone_id_for_copy_settings = true`. To learn more, see our examples, documentation, and 1.18.0 migration guide at https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide.html.markdown" ) func DataSource() *schema.Resource { @@ -22,6 +27,10 @@ func DataSource() *schema.Resource { Type: schema.TypeString, Required: true, }, + "use_zone_id_for_copy_settings": { + Type: schema.TypeBool, + Optional: true, + }, "cluster_id": { Type: schema.TypeString, Computed: true, @@ -47,6 +56,11 @@ func DataSource() *schema.Resource { Computed: true, }, "replication_spec_id": { + Type: schema.TypeString, + Computed: true, + Deprecated: DeprecationMsgOldSchema, + }, + "zone_id": { Type: schema.TypeString, Computed: true, }, @@ -247,73 +261,49 @@ func DataSource() *schema.Resource { func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) + useZoneIDForCopySettings := false - backupPolicy, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() - if err != nil { - return diag.Errorf(cluster.ErrorSnapshotBackupPolicyRead, clusterName, err) - } + var backupSchedule *admin.DiskBackupSnapshotSchedule20240805 + var backupScheduleOldSDK *admin20240530.DiskBackupSnapshotSchedule + var copySettings []map[string]any + var err error - if err := d.Set("cluster_id", backupPolicy.GetClusterId()); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "cluster_id", clusterName, err) + if v, ok := d.GetOk("use_zone_id_for_copy_settings"); ok { + useZoneIDForCopySettings = v.(bool) } - if err := d.Set("reference_hour_of_day", backupPolicy.GetReferenceHourOfDay()); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "reference_hour_of_day", clusterName, err) - } + if !useZoneIDForCopySettings { + backupScheduleOldSDK, _, err = connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + if err != nil { + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + return diag.Errorf("%s : %s : %s", errorSnapshotBackupScheduleRead, ErrorOperationNotPermitted, AsymmetricShardsUnsupportedActionDS) + } + return diag.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) + } - if err := d.Set("reference_minute_of_hour", backupPolicy.GetReferenceMinuteOfHour()); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "reference_minute_of_hour", clusterName, err) + copySettings = flattenCopySettingsOldSDK(backupScheduleOldSDK.GetCopySettings()) + backupSchedule = convertBackupScheduleToLatestExcludeCopySettings(backupScheduleOldSDK) + } else { + backupSchedule, _, err = connV2.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() + if err != nil { + return diag.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) + } + copySettings = FlattenCopySettings(backupSchedule.GetCopySettings()) } - if err := d.Set("restore_window_days", backupPolicy.GetRestoreWindowDays()); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "restore_window_days", clusterName, err) + diags := setSchemaFieldsExceptCopySettings(d, backupSchedule) + if diags.HasError() { + return diags } - if err := d.Set("next_snapshot", conversion.TimePtrToStringPtr(backupPolicy.NextSnapshot)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "next_snapshot", clusterName, err) - } - if err := d.Set("use_org_and_group_names_in_export_prefix", backupPolicy.GetUseOrgAndGroupNamesInExportPrefix()); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "use_org_and_group_names_in_export_prefix", clusterName, err) - } - if err := d.Set("auto_export_enabled", backupPolicy.GetAutoExportEnabled()); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "auto_export_enabled", clusterName, err) - } - if err := d.Set("id_policy", backupPolicy.GetPolicies()[0].GetId()); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "id_policy", clusterName, err) - } - if err := d.Set("export", flattenExport(backupPolicy)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "auto_export_enabled", clusterName, err) - } - if err := d.Set("policy_item_hourly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Hourly)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_hourly", clusterName, err) - } - - if err := d.Set("policy_item_daily", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Daily)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_daily", clusterName, err) - } - - if err := d.Set("policy_item_weekly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Weekly)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_weekly", clusterName, err) - } - - if err := d.Set("policy_item_monthly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Monthly)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_monthly", clusterName, err) - } - - if err := d.Set("policy_item_yearly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Yearly)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_yearly", clusterName, err) - } - - if err := d.Set("copy_settings", flattenCopySettings(backupPolicy.GetCopySettings())); err != nil { + if err := d.Set("copy_settings", copySettings); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "copy_settings", clusterName, err) } - if err := d.Set("export", flattenExport(backupPolicy)); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "export", clusterName, err) - } d.SetId(conversion.EncodeStateID(map[string]string{ "project_id": projectID, "cluster_name": clusterName, diff --git a/internal/service/cloudbackupschedule/model_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/model_cloud_backup_schedule.go new file mode 100644 index 0000000000..b7013bda55 --- /dev/null +++ b/internal/service/cloudbackupschedule/model_cloud_backup_schedule.go @@ -0,0 +1,62 @@ +package cloudbackupschedule + +import ( + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" +) + +func FlattenPolicyItem(items []admin.DiskBackupApiPolicyItem, frequencyType string) []map[string]any { + policyItems := make([]map[string]any, 0) + for _, v := range items { + if frequencyType == v.GetFrequencyType() { + policyItems = append(policyItems, map[string]any{ + "id": v.GetId(), + "frequency_interval": v.GetFrequencyInterval(), + "frequency_type": v.GetFrequencyType(), + "retention_unit": v.GetRetentionUnit(), + "retention_value": v.GetRetentionValue(), + }) + } + } + return policyItems +} + +func FlattenExport(roles *admin.DiskBackupSnapshotSchedule20240805) []map[string]any { + exportList := make([]map[string]any, 0) + emptyStruct := admin.DiskBackupSnapshotSchedule20240805{} + if emptyStruct.GetExport() != roles.GetExport() { + exportList = append(exportList, map[string]any{ + "frequency_type": roles.Export.GetFrequencyType(), + "export_bucket_id": roles.Export.GetExportBucketId(), + }) + } + return exportList +} + +func flattenCopySettingsOldSDK(copySettingList []admin20240530.DiskBackupCopySetting) []map[string]any { + copySettings := make([]map[string]any, 0) + for _, v := range copySettingList { + copySettings = append(copySettings, map[string]any{ + "cloud_provider": v.GetCloudProvider(), + "frequencies": v.GetFrequencies(), + "region_name": v.GetRegionName(), + "replication_spec_id": v.GetReplicationSpecId(), + "should_copy_oplogs": v.GetShouldCopyOplogs(), + }) + } + return copySettings +} + +func FlattenCopySettings(copySettingList []admin.DiskBackupCopySetting20240805) []map[string]any { + copySettings := make([]map[string]any, 0) + for _, v := range copySettingList { + copySettings = append(copySettings, map[string]any{ + "cloud_provider": v.GetCloudProvider(), + "frequencies": v.GetFrequencies(), + "region_name": v.GetRegionName(), + "zone_id": v.GetZoneId(), + "should_copy_oplogs": v.GetShouldCopyOplogs(), + }) + } + return copySettings +} diff --git a/internal/service/cloudbackupschedule/model_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/model_cloud_backup_schedule_test.go new file mode 100644 index 0000000000..e573361dee --- /dev/null +++ b/internal/service/cloudbackupschedule/model_cloud_backup_schedule_test.go @@ -0,0 +1,164 @@ +package cloudbackupschedule_test + +import ( + "reflect" + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupschedule" + "go.mongodb.org/atlas-sdk/v20240805003/admin" +) + +func TestFlattenPolicyItem(t *testing.T) { + testCases := []struct { + name string + items []admin.DiskBackupApiPolicyItem + frequencyType string + expected []map[string]any + }{ + { + name: "Matching Frequency Type", + items: []admin.DiskBackupApiPolicyItem{ + {Id: conversion.StringPtr("1"), FrequencyType: "daily", FrequencyInterval: 1, RetentionUnit: "days", RetentionValue: 30}, + {Id: conversion.StringPtr("2"), FrequencyType: "weekly", FrequencyInterval: 1, RetentionUnit: "weeks", RetentionValue: 52}, + {Id: conversion.StringPtr("3"), FrequencyType: "daily", FrequencyInterval: 2, RetentionUnit: "days", RetentionValue: 60}, + }, + frequencyType: "daily", + expected: []map[string]any{ + {"id": "1", "frequency_interval": 1, "frequency_type": "daily", "retention_unit": "days", "retention_value": 30}, + {"id": "3", "frequency_interval": 2, "frequency_type": "daily", "retention_unit": "days", "retention_value": 60}, + }, + }, + { + name: "No Matching Frequency Type", + items: []admin.DiskBackupApiPolicyItem{ + {Id: conversion.StringPtr("1"), FrequencyType: "weekly", FrequencyInterval: 1, RetentionUnit: "weeks", RetentionValue: 52}, + {Id: conversion.StringPtr("2"), FrequencyType: "monthly", FrequencyInterval: 1, RetentionUnit: "months", RetentionValue: 12}, + }, + frequencyType: "daily", + expected: []map[string]any{}, + }, + { + name: "Empty input", + items: []admin.DiskBackupApiPolicyItem{}, + frequencyType: "daily", + expected: []map[string]any{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := cloudbackupschedule.FlattenPolicyItem(tc.items, tc.frequencyType) + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Test %s failed: expected %+v, got %+v", tc.name, tc.expected, result) + } + }) + } +} + +func TestFlattenExport(t *testing.T) { + testCases := []struct { + name string + roles *admin.DiskBackupSnapshotSchedule20240805 + expected []map[string]any + }{ + { + name: "Non-empty Export", + roles: &admin.DiskBackupSnapshotSchedule20240805{ + Export: &admin.AutoExportPolicy{ + FrequencyType: conversion.StringPtr("daily"), + ExportBucketId: conversion.StringPtr("bucket123"), + }, + }, + expected: []map[string]any{ + {"frequency_type": "daily", "export_bucket_id": "bucket123"}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := cloudbackupschedule.FlattenExport(tc.roles) + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Test %s failed: expected %+v, got %+v", tc.name, tc.expected, result) + } + }) + } +} + +func TestFlattenCopySettings(t *testing.T) { + testCases := []struct { + name string + settings []admin.DiskBackupCopySetting20240805 + expected []map[string]any + }{ + { + name: "Multiple Copy Settings", + settings: []admin.DiskBackupCopySetting20240805{ + { + CloudProvider: conversion.StringPtr("AWS"), + Frequencies: &[]string{"daily", "weekly"}, + RegionName: conversion.StringPtr("US_WEST_1"), + ZoneId: "12345", + ShouldCopyOplogs: conversion.Pointer(true), + }, + { + CloudProvider: conversion.StringPtr("Azure"), + Frequencies: &[]string{"monthly"}, + RegionName: conversion.StringPtr("EAST_US"), + ZoneId: "67895", + ShouldCopyOplogs: conversion.Pointer(false), + }, + }, + expected: []map[string]any{ + {"cloud_provider": "AWS", "frequencies": []string{"daily", "weekly"}, "region_name": "US_WEST_1", "zone_id": "12345", "should_copy_oplogs": true}, + {"cloud_provider": "Azure", "frequencies": []string{"monthly"}, "region_name": "EAST_US", "zone_id": "67895", "should_copy_oplogs": false}, + }, + }, + { + name: "Empty Copy Settings List", + settings: []admin.DiskBackupCopySetting20240805{}, + expected: []map[string]any{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := cloudbackupschedule.FlattenCopySettings(tc.settings) + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Test %s failed: expected %+v, got %+v", tc.name, tc.expected, result) + } + }) + } +} + +func TestExpandPolicyItems(t *testing.T) { + testCases := []struct { + expected *[]admin.DiskBackupApiPolicyItem + name string + frequencyType string + items []any + }{ + { + name: "Valid Input", + items: []any{ + map[string]any{"id": "123", "retention_unit": "days", "retention_value": 30, "frequency_interval": 1}, + map[string]any{"id": "456", "retention_unit": "weeks", "retention_value": 52, "frequency_interval": 1}, + }, + frequencyType: "monthly", + expected: &[]admin.DiskBackupApiPolicyItem{ + {Id: conversion.StringPtr("123"), RetentionUnit: "days", RetentionValue: 30, FrequencyInterval: 1, FrequencyType: "monthly"}, + {Id: conversion.StringPtr("456"), RetentionUnit: "weeks", RetentionValue: 52, FrequencyInterval: 1, FrequencyType: "monthly"}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := cloudbackupschedule.ExpandPolicyItems(tc.items, tc.frequencyType) + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Test %s failed: expected %+v, got %+v", tc.name, *tc.expected, *result) + } + }) + } +} diff --git a/internal/service/cloudbackupschedule/model_sdk_version_conversion.go b/internal/service/cloudbackupschedule/model_sdk_version_conversion.go new file mode 100644 index 0000000000..5e59dd3c3b --- /dev/null +++ b/internal/service/cloudbackupschedule/model_sdk_version_conversion.go @@ -0,0 +1,116 @@ +package cloudbackupschedule + +import ( + admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" +) + +// Conversions from one SDK model version to another are used to avoid duplicating our flatten/expand conversion functions. +// - These functions must not contain any business logic. +// - All will be removed once we rely on a single API version. + +func convertPolicyItemsToOldSDK(slice *[]admin.DiskBackupApiPolicyItem) []admin20240530.DiskBackupApiPolicyItem { + if slice == nil { + return nil + } + policyItemsSlice := *slice + results := make([]admin20240530.DiskBackupApiPolicyItem, len(policyItemsSlice)) + for i := range len(policyItemsSlice) { + policyItem := policyItemsSlice[i] + results[i] = admin20240530.DiskBackupApiPolicyItem{ + FrequencyInterval: policyItem.FrequencyInterval, + FrequencyType: policyItem.FrequencyType, + Id: policyItem.Id, + RetentionUnit: policyItem.RetentionUnit, + RetentionValue: policyItem.RetentionValue, + } + } + return results +} + +func convertPoliciesToLatest(slice *[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy) *[]admin.AdvancedDiskBackupSnapshotSchedulePolicy { + if slice == nil { + return nil + } + + policySlice := *slice + results := make([]admin.AdvancedDiskBackupSnapshotSchedulePolicy, len(policySlice)) + for i := range len(policySlice) { + policyItem := policySlice[i] + results[i] = admin.AdvancedDiskBackupSnapshotSchedulePolicy{ + Id: policyItem.Id, + PolicyItems: convertPolicyItemsToLatest(policyItem.PolicyItems), + } + } + return &results +} + +func convertPolicyItemsToLatest(slice *[]admin20240530.DiskBackupApiPolicyItem) *[]admin.DiskBackupApiPolicyItem { + if slice == nil { + return nil + } + policyItemsSlice := *slice + results := make([]admin.DiskBackupApiPolicyItem, len(policyItemsSlice)) + for i := range len(policyItemsSlice) { + policyItem := policyItemsSlice[i] + results[i] = admin.DiskBackupApiPolicyItem{ + FrequencyInterval: policyItem.FrequencyInterval, + FrequencyType: policyItem.FrequencyType, + Id: policyItem.Id, + RetentionUnit: policyItem.RetentionUnit, + RetentionValue: policyItem.RetentionValue, + } + } + return &results +} + +func convertAutoExportPolicyToOldSDK(exportPolicy *admin.AutoExportPolicy) *admin20240530.AutoExportPolicy { + if exportPolicy == nil { + return nil + } + + return &admin20240530.AutoExportPolicy{ + ExportBucketId: exportPolicy.ExportBucketId, + FrequencyType: exportPolicy.FrequencyType, + } +} + +func convertAutoExportPolicyToLatest(exportPolicy *admin20240530.AutoExportPolicy) *admin.AutoExportPolicy { + if exportPolicy == nil { + return nil + } + + return &admin.AutoExportPolicy{ + ExportBucketId: exportPolicy.ExportBucketId, + FrequencyType: exportPolicy.FrequencyType, + } +} + +func convertBackupScheduleReqToOldSDK(req *admin.DiskBackupSnapshotSchedule20240805, + copySettingsOldSDK *[]admin20240530.DiskBackupCopySetting, + policiesOldSDK *[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy) *admin20240530.DiskBackupSnapshotSchedule { + return &admin20240530.DiskBackupSnapshotSchedule{ + CopySettings: copySettingsOldSDK, + Policies: policiesOldSDK, + AutoExportEnabled: req.AutoExportEnabled, + Export: convertAutoExportPolicyToOldSDK(req.Export), + UseOrgAndGroupNamesInExportPrefix: req.UseOrgAndGroupNamesInExportPrefix, + ReferenceHourOfDay: req.ReferenceHourOfDay, + ReferenceMinuteOfHour: req.ReferenceMinuteOfHour, + RestoreWindowDays: req.RestoreWindowDays, + UpdateSnapshots: req.UpdateSnapshots, + } +} + +func convertBackupScheduleToLatestExcludeCopySettings(backupSchedule *admin20240530.DiskBackupSnapshotSchedule) *admin.DiskBackupSnapshotSchedule20240805 { + return &admin.DiskBackupSnapshotSchedule20240805{ + Policies: convertPoliciesToLatest(backupSchedule.Policies), + AutoExportEnabled: backupSchedule.AutoExportEnabled, + Export: convertAutoExportPolicyToLatest(backupSchedule.Export), + UseOrgAndGroupNamesInExportPrefix: backupSchedule.UseOrgAndGroupNamesInExportPrefix, + ReferenceHourOfDay: backupSchedule.ReferenceHourOfDay, + ReferenceMinuteOfHour: backupSchedule.ReferenceMinuteOfHour, + RestoreWindowDays: backupSchedule.RestoreWindowDays, + UpdateSnapshots: backupSchedule.UpdateSnapshots, + } +} diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go index 2e9871c0d8..f4720df14c 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go @@ -9,24 +9,32 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( - Hourly = "hourly" - Daily = "daily" - Weekly = "weekly" - Monthly = "monthly" - Yearly = "yearly" - errorSnapshotBackupScheduleCreate = "error creating a Cloud Backup Schedule: %s" - errorSnapshotBackupScheduleUpdate = "error updating a Cloud Backup Schedule: %s" - errorSnapshotBackupScheduleRead = "error getting a Cloud Backup Schedule for the cluster(%s): %s" - errorSnapshotBackupScheduleSetting = "error setting `%s` for Cloud Backup Schedule(%s): %s" + Hourly = "hourly" + Daily = "daily" + Weekly = "weekly" + Monthly = "monthly" + Yearly = "yearly" + errorSnapshotBackupScheduleCreate = "error creating a Cloud Backup Schedule: %s" + errorSnapshotBackupScheduleUpdate = "error updating a Cloud Backup Schedule: %s" + errorSnapshotBackupScheduleRead = "error getting a Cloud Backup Schedule for the cluster(%s): %s" + ErrorOperationNotPermitted = "error operation not permitted" + AsymmetricShardsUnsupportedAction = "Ensure resource schema uses copy_settings.#.zone_id instead of copy_settings.#.replication_spec_id for asymmetric sharded clusters. Please refer to our examples, documentation, and 1.18.0 migration guide for more details at https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide.html.markdown" + errorSnapshotBackupScheduleSetting = "error setting `%s` for Cloud Backup Schedule(%s): %s" + DeprecationOldSchemaAction = "To learn more, see our examples, documentation, and 1.18.0 migration guide for more details at https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/guides/1.18.0-upgrade-guide.html.markdown" + AsymmetricShardsUnsupportedAPIError = "ASYMMETRIC_SHARD_BACKUP_UNSUPPORTED" ) +var DeprecationMsgOldSchema = fmt.Sprintf("%s %s", fmt.Sprintf(constant.DeprecationParamWithReplacement, "`copy_settings.#.zone_id`"), DeprecationOldSchemaAction) + func Resource() *schema.Resource { return &schema.Resource{ CreateContext: resourceCreate, @@ -85,6 +93,12 @@ func Resource() *schema.Resource { Computed: true, }, "replication_spec_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: DeprecationMsgOldSchema, + }, + "zone_id": { Type: schema.TypeString, Optional: true, Computed: true, @@ -308,6 +322,7 @@ func Resource() *schema.Resource { func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) @@ -315,7 +330,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. // MongoDB Atlas automatically generates a default backup policy for that cluster. // As a result, we need to first delete the default policies to avoid having // the infrastructure differs from the TF configuration file. - if _, _, err := connV220240530.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute(); err != nil { + if _, _, err := connV2.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute(); err != nil { diagWarning := diag.Diagnostic{ Severity: diag.Warning, Summary: "Error deleting default backup schedule", @@ -324,7 +339,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. diags = append(diags, diagWarning) } - if err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, d, projectID, clusterName); err != nil { + if err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, connV2, d, projectID, clusterName, true); err != nil { diags = append(diags, diag.Errorf(errorSnapshotBackupScheduleCreate, err)...) return diags } @@ -339,20 +354,63 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] + var backupSchedule *admin.DiskBackupSnapshotSchedule20240805 + var backupScheduleOldSDK *admin20240530.DiskBackupSnapshotSchedule + var copySettings []map[string]any + var resp *http.Response + var err error - backupPolicy, resp, err := connV220240530.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() + useOldAPI, err := shouldUseOldAPI(d, false) if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { - d.SetId("") - return nil - } return diag.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) } + if useOldAPI { + backupScheduleOldSDK, resp, err = connV220240530.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + return diag.Errorf("%s : %s : %s", errorSnapshotBackupScheduleRead, ErrorOperationNotPermitted, AsymmetricShardsUnsupportedAction) + } + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + return diag.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) + } + + copySettings = flattenCopySettingsOldSDK(backupScheduleOldSDK.GetCopySettings()) + backupSchedule = convertBackupScheduleToLatestExcludeCopySettings(backupScheduleOldSDK) + } else { + backupSchedule, resp, err = connV2.CloudBackupsApi.GetBackupSchedule(context.Background(), projectID, clusterName).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + return diag.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) + } + copySettings = FlattenCopySettings(backupSchedule.GetCopySettings()) + } + + diags := setSchemaFieldsExceptCopySettings(d, backupSchedule) + if diags.HasError() { + return diags + } + + if err := d.Set("copy_settings", copySettings); err != nil { + return diag.Errorf(errorSnapshotBackupScheduleSetting, "copy_settings", clusterName, err) + } + + return nil +} + +func setSchemaFieldsExceptCopySettings(d *schema.ResourceData, backupPolicy *admin.DiskBackupSnapshotSchedule20240805) diag.Diagnostics { + clusterName := backupPolicy.GetClusterName() if err := d.Set("cluster_id", backupPolicy.GetClusterId()); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "cluster_id", clusterName, err) } @@ -377,7 +435,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.Errorf(errorSnapshotBackupScheduleSetting, "id_policy", clusterName, err) } - if err := d.Set("export", flattenExport(backupPolicy)); err != nil { + if err := d.Set("export", FlattenExport(backupPolicy)); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "export", clusterName, err) } @@ -389,35 +447,31 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di return diag.Errorf(errorSnapshotBackupScheduleSetting, "use_org_and_group_names_in_export_prefix", clusterName, err) } - if err := d.Set("policy_item_hourly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Hourly)); err != nil { + if err := d.Set("policy_item_hourly", FlattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Hourly)); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_hourly", clusterName, err) } - if err := d.Set("policy_item_daily", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Daily)); err != nil { + if err := d.Set("policy_item_daily", FlattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Daily)); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_daily", clusterName, err) } - if err := d.Set("policy_item_weekly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Weekly)); err != nil { + if err := d.Set("policy_item_weekly", FlattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Weekly)); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_weekly", clusterName, err) } - if err := d.Set("policy_item_monthly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Monthly)); err != nil { + if err := d.Set("policy_item_monthly", FlattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Monthly)); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_monthly", clusterName, err) } - if err := d.Set("policy_item_yearly", flattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Yearly)); err != nil { + if err := d.Set("policy_item_yearly", FlattenPolicyItem(backupPolicy.GetPolicies()[0].GetPolicyItems(), Yearly)); err != nil { return diag.Errorf(errorSnapshotBackupScheduleSetting, "policy_item_yearly", clusterName, err) } - - if err := d.Set("copy_settings", flattenCopySettings(backupPolicy.GetCopySettings())); err != nil { - return diag.Errorf(errorSnapshotBackupScheduleSetting, "copy_settings", clusterName, err) - } - return nil } func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] @@ -429,7 +483,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } } - err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, d, projectID, clusterName) + err := cloudBackupScheduleCreateOrUpdate(ctx, connV220240530, connV2, d, projectID, clusterName, false) if err != nil { return diag.Errorf(errorSnapshotBackupScheduleUpdate, err) } @@ -438,12 +492,12 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] - _, _, err := connV220240530.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute() + _, _, err := connV2.CloudBackupsApi.DeleteAllBackupSchedules(ctx, projectID, clusterName).Execute() if err != nil { return diag.Errorf("error deleting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) } @@ -454,7 +508,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 + connV2 := meta.(*config.MongoDBClient).AtlasV2 parts := strings.SplitN(d.Id(), "-", 2) if len(parts) != 2 { @@ -464,7 +518,7 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s projectID := parts[0] clusterName := parts[1] - _, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + _, _, err := connV2.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() if err != nil { return nil, fmt.Errorf(errorSnapshotBackupScheduleRead, clusterName, err) } @@ -485,50 +539,32 @@ func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*s return []*schema.ResourceData{d}, nil } -func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220240530 *admin20240530.APIClient, d *schema.ResourceData, projectID, clusterName string) error { - resp, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() - if err != nil { - return fmt.Errorf("error getting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) - } - - req := &admin20240530.DiskBackupSnapshotSchedule{} +func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220240530 *admin20240530.APIClient, connV2 *admin.APIClient, d *schema.ResourceData, projectID, clusterName string, isCreate bool) error { + var err error copySettings := d.Get("copy_settings") - if copySettings != nil && (conversion.HasElementsSliceOrMap(copySettings) || d.HasChange("copy_settings")) { - req.CopySettings = expandCopySettings(copySettings.([]any)) + + useOldAPI, err := shouldUseOldAPI(d, isCreate) + if err != nil { + return err } - var policiesItem []admin20240530.DiskBackupApiPolicyItem + req := &admin.DiskBackupSnapshotSchedule20240805{} + var policiesItem []admin.DiskBackupApiPolicyItem if v, ok := d.GetOk("policy_item_hourly"); ok { - item := v.([]any) - itemObj := item[0].(map[string]any) - policiesItem = append(policiesItem, expandPolicyItem(itemObj, Hourly)) + policiesItem = append(policiesItem, *ExpandPolicyItems(v.([]any), Hourly)...) } if v, ok := d.GetOk("policy_item_daily"); ok { - item := v.([]any) - itemObj := item[0].(map[string]any) - policiesItem = append(policiesItem, expandPolicyItem(itemObj, Daily)) + policiesItem = append(policiesItem, *ExpandPolicyItems(v.([]any), Daily)...) } if v, ok := d.GetOk("policy_item_weekly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - policiesItem = append(policiesItem, expandPolicyItem(itemObj, Weekly)) - } + policiesItem = append(policiesItem, *ExpandPolicyItems(v.([]any), Weekly)...) } if v, ok := d.GetOk("policy_item_monthly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - policiesItem = append(policiesItem, expandPolicyItem(itemObj, Monthly)) - } + policiesItem = append(policiesItem, *ExpandPolicyItems(v.([]any), Monthly)...) } if v, ok := d.GetOk("policy_item_yearly"); ok { - items := v.([]any) - for _, s := range items { - itemObj := s.(map[string]any) - policiesItem = append(policiesItem, expandPolicyItem(itemObj, Yearly)) - } + policiesItem = append(policiesItem, *ExpandPolicyItems(v.([]any), Yearly)...) } if d.HasChange("auto_export_enabled") { @@ -536,30 +572,13 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220240530 *admi } if v, ok := d.GetOk("export"); ok { - item := v.([]any) - itemObj := item[0].(map[string]any) - if autoExportEnabled := d.Get("auto_export_enabled"); autoExportEnabled != nil && autoExportEnabled.(bool) { - req.Export = &admin20240530.AutoExportPolicy{ - ExportBucketId: conversion.StringPtr(itemObj["export_bucket_id"].(string)), - FrequencyType: conversion.StringPtr(itemObj["frequency_type"].(string)), - } - } + req.Export = expandAutoExportPolicy(v.([]any), d) } if d.HasChange("use_org_and_group_names_in_export_prefix") { req.UseOrgAndGroupNamesInExportPrefix = conversion.Pointer(d.Get("use_org_and_group_names_in_export_prefix").(bool)) } - if len(policiesItem) > 0 { - policy := admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{ - PolicyItems: &policiesItem, - } - if len(resp.GetPolicies()) == 1 { - policy.Id = resp.GetPolicies()[0].Id - } - req.Policies = &[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{policy} - } - if v, ok := d.GetOkExists("reference_hour_of_day"); ok { req.ReferenceHourOfDay = conversion.Pointer(v.(int)) } @@ -575,7 +594,44 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220240530 *admi req.UpdateSnapshots = value } - _, _, err = connV220240530.CloudBackupsApi.UpdateBackupSchedule(context.Background(), projectID, clusterName, req).Execute() + if useOldAPI { + resp, _, err := connV220240530.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + if err != nil { + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + return fmt.Errorf("%s : %s", ErrorOperationNotPermitted, AsymmetricShardsUnsupportedAction) + } + return fmt.Errorf("error getting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) + } + var copySettingsOldSDK *[]admin20240530.DiskBackupCopySetting + if isCopySettingsNonEmptyOrChanged(d) { + copySettingsOldSDK = expandCopySettingsOldSDK(copySettings.([]any)) + } + + policiesOldSDK := getRequestPoliciesOldSDK(convertPolicyItemsToOldSDK(&policiesItem), resp.GetPolicies()) + + reqOld := convertBackupScheduleReqToOldSDK(req, copySettingsOldSDK, policiesOldSDK) + _, _, err = connV220240530.CloudBackupsApi.UpdateBackupSchedule(context.Background(), projectID, clusterName, reqOld).Execute() + if err != nil { + if apiError, ok := admin20240530.AsError(err); ok && apiError.GetErrorCode() == AsymmetricShardsUnsupportedAPIError { + return fmt.Errorf("%s : %s", ErrorOperationNotPermitted, AsymmetricShardsUnsupportedAction) + } + return err + } + + return nil + } + + resp, _, err := connV2.CloudBackupsApi.GetBackupSchedule(ctx, projectID, clusterName).Execute() + if err != nil { + return fmt.Errorf("error getting MongoDB Cloud Backup Schedule (%s): %s", clusterName, err) + } + if isCopySettingsNonEmptyOrChanged(d) { + req.CopySettings = ExpandCopySettings(copySettings.([]any)) + } + + req.Policies = getRequestPolicies(policiesItem, resp.GetPolicies()) + + _, _, err = connV2.CloudBackupsApi.UpdateBackupSchedule(context.Background(), projectID, clusterName, req).Execute() if err != nil { return err } @@ -583,49 +639,51 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV220240530 *admi return nil } -func flattenPolicyItem(items []admin20240530.DiskBackupApiPolicyItem, frequencyType string) []map[string]any { - policyItems := make([]map[string]any, 0) - for _, v := range items { - if frequencyType == v.GetFrequencyType() { - policyItems = append(policyItems, map[string]any{ - "id": v.GetId(), - "frequency_interval": v.GetFrequencyInterval(), - "frequency_type": v.GetFrequencyType(), - "retention_unit": v.GetRetentionUnit(), - "retention_value": v.GetRetentionValue(), - }) - } +func ExpandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting20240805 { + if tfMap == nil { + return nil + } + + frequencies := conversion.ExpandStringList(tfMap["frequencies"].(*schema.Set).List()) + copySetting := &admin.DiskBackupCopySetting20240805{ + CloudProvider: conversion.Pointer(tfMap["cloud_provider"].(string)), + Frequencies: &frequencies, + RegionName: conversion.Pointer(tfMap["region_name"].(string)), + ZoneId: tfMap["zone_id"].(string), + ShouldCopyOplogs: conversion.Pointer(tfMap["should_copy_oplogs"].(bool)), } - return policyItems + return copySetting } -func flattenExport(roles *admin20240530.DiskBackupSnapshotSchedule) []map[string]any { - exportList := make([]map[string]any, 0) - emptyStruct := admin20240530.DiskBackupSnapshotSchedule{} - if emptyStruct.GetExport() != roles.GetExport() { - exportList = append(exportList, map[string]any{ - "frequency_type": roles.Export.GetFrequencyType(), - "export_bucket_id": roles.Export.GetExportBucketId(), - }) +func ExpandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting20240805 { + copySettings := make([]admin.DiskBackupCopySetting20240805, 0) + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok { + continue + } + apiObject := ExpandCopySetting(tfMap) + copySettings = append(copySettings, *apiObject) } - return exportList + return ©Settings } -func flattenCopySettings(copySettingList []admin20240530.DiskBackupCopySetting) []map[string]any { - copySettings := make([]map[string]any, 0) - for _, v := range copySettingList { - copySettings = append(copySettings, map[string]any{ - "cloud_provider": v.GetCloudProvider(), - "frequencies": v.GetFrequencies(), - "region_name": v.GetRegionName(), - "replication_spec_id": v.GetReplicationSpecId(), - "should_copy_oplogs": v.GetShouldCopyOplogs(), - }) - } - return copySettings +func expandCopySettingsOldSDK(tfList []any) *[]admin20240530.DiskBackupCopySetting { + copySettings := make([]admin20240530.DiskBackupCopySetting, 0) + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok { + continue + } + apiObject := expandCopySettingOldSDK(tfMap) + copySettings = append(copySettings, *apiObject) + } + return ©Settings } -func expandCopySetting(tfMap map[string]any) *admin20240530.DiskBackupCopySetting { +func expandCopySettingOldSDK(tfMap map[string]any) *admin20240530.DiskBackupCopySetting { if tfMap == nil { return nil } @@ -641,22 +699,30 @@ func expandCopySetting(tfMap map[string]any) *admin20240530.DiskBackupCopySettin return copySetting } -func expandCopySettings(tfList []any) *[]admin20240530.DiskBackupCopySetting { - copySettings := make([]admin20240530.DiskBackupCopySetting, 0) +func expandAutoExportPolicy(items []any, d *schema.ResourceData) *admin.AutoExportPolicy { + itemObj := items[0].(map[string]any) - for _, tfMapRaw := range tfList { - tfMap, ok := tfMapRaw.(map[string]any) - if !ok { - continue + if autoExportEnabled := d.Get("auto_export_enabled"); autoExportEnabled != nil && autoExportEnabled.(bool) { + return &admin.AutoExportPolicy{ + ExportBucketId: conversion.StringPtr(itemObj["export_bucket_id"].(string)), + FrequencyType: conversion.StringPtr(itemObj["frequency_type"].(string)), } - apiObject := expandCopySetting(tfMap) - copySettings = append(copySettings, *apiObject) } - return ©Settings + return nil } -func expandPolicyItem(itemObj map[string]any, frequencyType string) admin20240530.DiskBackupApiPolicyItem { - return admin20240530.DiskBackupApiPolicyItem{ +func ExpandPolicyItems(items []any, frequencyType string) *[]admin.DiskBackupApiPolicyItem { + results := make([]admin.DiskBackupApiPolicyItem, len(items)) + + for i, s := range items { + itemObj := s.(map[string]any) + results[i] = expandPolicyItem(itemObj, frequencyType) + } + return &results +} + +func expandPolicyItem(itemObj map[string]any, frequencyType string) admin.DiskBackupApiPolicyItem { + return admin.DiskBackupApiPolicyItem{ Id: policyItemID(itemObj), RetentionUnit: itemObj["retention_unit"].(string), RetentionValue: itemObj["retention_value"].(int), @@ -674,3 +740,79 @@ func policyItemID(policyState map[string]any) *string { } return nil } + +func shouldUseOldAPI(d *schema.ResourceData, isCreate bool) (bool, error) { + copySettings := d.Get("copy_settings") + if isCopySettingsNonEmptyOrChanged(d) { + return CheckCopySettingsToUseOldAPI(copySettings.([]any), isCreate) + } + return false, nil +} + +func isCopySettingsNonEmptyOrChanged(d *schema.ResourceData) bool { + copySettings := d.Get("copy_settings") + return copySettings != nil && (conversion.HasElementsSliceOrMap(copySettings) || d.HasChange("copy_settings")) +} + +// CheckCopySettingsToUseOldAPI verifies that all elements in tfList use either `replication_spec_id` or `zone_id` +// Returns an error if any element has both `replication_spec_id` and `zone_id` set during create +// and returns a bool if the old API should be used or not +func CheckCopySettingsToUseOldAPI(tfList []any, isCreate bool) (bool, error) { + allHaveRepID := true + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]any) + if !ok { + return false, fmt.Errorf("element is not a valid map[string]any") + } + + repSpecID, repOk := tfMap["replication_spec_id"].(string) + zoneID, zoneOk := tfMap["zone_id"].(string) + + if repOk && repSpecID != "" && zoneOk && zoneID != "" { + if isCreate { + return false, fmt.Errorf("both 'replication_spec_id' and 'zone_id' cannot be set") + } + return false, nil + } + + if (repOk && repSpecID != "" && zoneOk && zoneID != "") || (!repOk && !zoneOk) { + return false, fmt.Errorf("each element must have either 'replication_spec_id' or 'zone_id' set") + } + + if !repOk || repSpecID == "" { + allHaveRepID = false + } + } + + if allHaveRepID { + return true, nil + } + return false, nil +} + +func getRequestPoliciesOldSDK(policiesItem []admin20240530.DiskBackupApiPolicyItem, respPolicies []admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy) *[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy { + if len(policiesItem) > 0 { + policy := admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{ + PolicyItems: &policiesItem, + } + if len(respPolicies) == 1 { + policy.Id = respPolicies[0].Id + } + return &[]admin20240530.AdvancedDiskBackupSnapshotSchedulePolicy{policy} + } + return nil +} + +func getRequestPolicies(policiesItem []admin.DiskBackupApiPolicyItem, respPolicies []admin.AdvancedDiskBackupSnapshotSchedulePolicy) *[]admin.AdvancedDiskBackupSnapshotSchedulePolicy { + if len(policiesItem) > 0 { + policy := admin.AdvancedDiskBackupSnapshotSchedulePolicy{ + PolicyItems: &policiesItem, + } + if len(respPolicies) == 1 { + policy.Id = respPolicies[0].Id + } + return &[]admin.AdvancedDiskBackupSnapshotSchedulePolicy{policy} + } + return nil +} diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 999f60f14c..8ce9343db3 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -31,7 +31,6 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), @@ -45,3 +44,86 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { }, }) } + +func TestMigBackupRSCloudBackupSchedule_copySettings(t *testing.T) { + mig.SkipIfVersionBelow(t, "1.16.0") // yearly policy item introduced in this version + var ( + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{ + CloudBackup: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_EAST_2"}, + }, + PitEnabled: true, // you cannot copy oplogs when pit is not enabled + }) + clusterName = clusterInfo.Name + terraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName + projectID = clusterInfo.ProjectID + copySettingsConfigWithRepSpecID = configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin20240530.DiskBackupSnapshotSchedule{ + ReferenceHourOfDay: conversion.Pointer(3), + ReferenceMinuteOfHour: conversion.Pointer(45), + RestoreWindowDays: conversion.Pointer(1), + }) + copySettingsConfigWithZoneID = configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20240530.DiskBackupSnapshotSchedule{ + ReferenceHourOfDay: conversion.Pointer(3), + ReferenceMinuteOfHour: conversion.Pointer(45), + RestoreWindowDays: conversion.Pointer(1), + }) + checkMap = map[string]string{ + "cluster_name": clusterName, + "reference_hour_of_day": "3", + "reference_minute_of_hour": "45", + "restore_window_days": "1", + "policy_item_hourly.#": "1", + "policy_item_daily.#": "1", + "policy_item_weekly.#": "1", + "policy_item_monthly.#": "1", + "policy_item_yearly.#": "1", + "policy_item_hourly.0.frequency_interval": "1", + "policy_item_hourly.0.retention_unit": "days", + "policy_item_hourly.0.retention_value": "1", + "policy_item_daily.0.frequency_interval": "1", + "policy_item_daily.0.retention_unit": "days", + "policy_item_daily.0.retention_value": "2", + "policy_item_weekly.0.frequency_interval": "4", + "policy_item_weekly.0.retention_unit": "weeks", + "policy_item_weekly.0.retention_value": "3", + "policy_item_monthly.0.frequency_interval": "5", + "policy_item_monthly.0.retention_unit": "months", + "policy_item_monthly.0.retention_value": "4", + "policy_item_yearly.0.frequency_interval": "1", + "policy_item_yearly.0.retention_unit": "years", + "policy_item_yearly.0.retention_value": "1", + } + copySettingsChecks = map[string]string{ + "copy_settings.#": "1", + "copy_settings.0.cloud_provider": "AWS", + "copy_settings.0.region_name": "US_EAST_1", + "copy_settings.0.should_copy_oplogs": "true", + } + ) + + checksDefault := acc.AddAttrChecks(resourceName, []resource.TestCheckFunc{checkExists(resourceName)}, checkMap) + checksCreate := acc.AddAttrChecks(resourceName, checksDefault, copySettingsChecks) + checksCreateWithReplicationSpecID := acc.AddAttrSetChecks(resourceName, checksCreate, "copy_settings.0.replication_spec_id") + checksUpdateWithZoneID := acc.AddAttrSetChecks(resourceName, checksCreate, "copy_settings.0.zone_id") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + CheckDestroy: checkDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: mig.ExternalProviders(), + Config: copySettingsConfigWithRepSpecID, + Check: resource.ComposeAggregateTestCheckFunc(checksCreateWithReplicationSpecID...), + }, + mig.TestStepCheckEmptyPlan(copySettingsConfigWithRepSpecID), + { + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Config: copySettingsConfigWithZoneID, + Check: resource.ComposeAggregateTestCheckFunc(checksUpdateWithZoneID...), + }, + mig.TestStepCheckEmptyPlan(copySettingsConfigWithZoneID), + }, + }) +} diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 7cd423ed22..a39c810386 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupschedule" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" ) @@ -250,7 +251,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { }) } -func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { +func TestAccBackupRSCloudBackupSchedule_copySettings_repSpecId(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{ CloudBackup: true, @@ -299,9 +300,17 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { "copy_settings.#": "0", } ) - checksDefault := acc.AddAttrChecks(resourceName, []resource.TestCheckFunc{checkExists(resourceName)}, checkMap) - checksCreate := acc.AddAttrChecks(resourceName, checksDefault, copySettingsChecks) - checksUpdate := acc.AddAttrChecks(resourceName, checksDefault, emptyCopySettingsChecks) + checksDefaultRS := acc.AddAttrChecks(resourceName, []resource.TestCheckFunc{checkExists(resourceName)}, checkMap) + checksCreateRS := acc.AddAttrChecks(resourceName, checksDefaultRS, copySettingsChecks) + checksCreateAll := acc.AddAttrSetChecks(resourceName, checksCreateRS, "copy_settings.0.replication_spec_id") + + checksDefaultDS := acc.AddAttrChecks(dataSourceName, []resource.TestCheckFunc{}, checkMap) + checksCreateDS := acc.AddAttrChecks(dataSourceName, checksDefaultDS, copySettingsChecks) + checksCreateDSAll := acc.AddAttrSetChecks(dataSourceName, checksCreateDS, "copy_settings.0.replication_spec_id") + + checksCreateAll = append(checksCreateAll, checksCreateDSAll...) + + checksUpdate := acc.AddAttrChecks(resourceName, checksDefaultRS, emptyCopySettingsChecks) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -309,15 +318,15 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), }), - Check: resource.ComposeAggregateTestCheckFunc(checksCreate...), + Check: resource.ComposeAggregateTestCheckFunc(checksCreateAll...), }, { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, true, &admin20240530.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -327,6 +336,93 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { }, }) } + +func TestAccBackupRSCloudBackupSchedule_copySettings_zoneId(t *testing.T) { + var ( + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{ + CloudBackup: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_EAST_2"}, + }, + PitEnabled: true, // you cannot copy oplogs when pit is not enabled + }) + clusterName = clusterInfo.Name + terraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName + projectID = clusterInfo.ProjectID + checkMap = map[string]string{ + "cluster_name": clusterName, + "reference_hour_of_day": "3", + "reference_minute_of_hour": "45", + "restore_window_days": "1", + "policy_item_hourly.#": "1", + "policy_item_daily.#": "1", + "policy_item_weekly.#": "1", + "policy_item_monthly.#": "1", + "policy_item_yearly.#": "1", + "policy_item_hourly.0.frequency_interval": "1", + "policy_item_hourly.0.retention_unit": "days", + "policy_item_hourly.0.retention_value": "1", + "policy_item_daily.0.frequency_interval": "1", + "policy_item_daily.0.retention_unit": "days", + "policy_item_daily.0.retention_value": "2", + "policy_item_weekly.0.frequency_interval": "4", + "policy_item_weekly.0.retention_unit": "weeks", + "policy_item_weekly.0.retention_value": "3", + "policy_item_monthly.0.frequency_interval": "5", + "policy_item_monthly.0.retention_unit": "months", + "policy_item_monthly.0.retention_value": "4", + "policy_item_yearly.0.frequency_interval": "1", + "policy_item_yearly.0.retention_unit": "years", + "policy_item_yearly.0.retention_value": "1", + } + copySettingsChecks = map[string]string{ + "copy_settings.#": "1", + "copy_settings.0.cloud_provider": "AWS", + "copy_settings.0.region_name": "US_EAST_1", + "copy_settings.0.should_copy_oplogs": "true", + } + emptyCopySettingsChecks = map[string]string{ + "copy_settings.#": "0", + } + ) + checksDefaultRS := acc.AddAttrChecks(resourceName, []resource.TestCheckFunc{checkExists(resourceName)}, checkMap) + checksCreateRS := acc.AddAttrChecks(resourceName, checksDefaultRS, copySettingsChecks) + checksCreateAll := acc.AddAttrSetChecks(resourceName, checksCreateRS, "copy_settings.0.zone_id") + + checksDefaultDS := acc.AddAttrChecks(dataSourceName, []resource.TestCheckFunc{}, checkMap) + checksCreateDS := acc.AddAttrChecks(dataSourceName, checksDefaultDS, copySettingsChecks) + checksCreateDSAll := acc.AddAttrSetChecks(dataSourceName, checksCreateDS, "copy_settings.0.zone_id") + + checksCreateAll = append(checksCreateAll, checksCreateDSAll...) + + checksUpdate := acc.AddAttrChecks(resourceName, checksDefaultRS, emptyCopySettingsChecks) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: checkDestroy, + Steps: []resource.TestStep{ + { + Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20240530.DiskBackupSnapshotSchedule{ + ReferenceHourOfDay: conversion.Pointer(3), + ReferenceMinuteOfHour: conversion.Pointer(45), + RestoreWindowDays: conversion.Pointer(1), + }), + Check: resource.ComposeAggregateTestCheckFunc(checksCreateAll...), + }, + { + Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, false, &admin20240530.DiskBackupSnapshotSchedule{ + ReferenceHourOfDay: conversion.Pointer(3), + ReferenceMinuteOfHour: conversion.Pointer(45), + RestoreWindowDays: conversion.Pointer(1), + }), + Check: resource.ComposeAggregateTestCheckFunc(checksUpdate...), + }, + }, + }) +} + func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true}) @@ -429,6 +525,82 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { }) } +func TestCheckCopySettingsToUseOldAPI(t *testing.T) { + testCases := []struct { + name string + errMsg string + tfList []any + isCreate bool + expectedShouldUseOldAPI bool + expectErr bool + }{ + { + name: "Valid - all replication_spec_id set", + tfList: []any{ + map[string]any{"replication_spec_id": "123"}, + map[string]any{"replication_spec_id": "456"}, + }, + isCreate: true, + expectedShouldUseOldAPI: true, + expectErr: false, + }, + { + name: "Valid - all zone_id set", + tfList: []any{ + map[string]any{"zone_id": "123"}, + map[string]any{"zone_id": "456"}, + }, + isCreate: true, + expectedShouldUseOldAPI: false, + expectErr: false, + }, + { + name: "Invalid - both IDs set on Create", + tfList: []any{ + map[string]any{"replication_spec_id": "123", "zone_id": "zone123"}, + }, + isCreate: true, + expectedShouldUseOldAPI: false, + expectErr: true, + errMsg: "both 'replication_spec_id' and 'zone_id' cannot be set", + }, + { + name: "Valid - Both IDs set on Update/Read", + tfList: []any{ + map[string]any{"replication_spec_id": "123", "zone_id": "zone123"}, + }, + isCreate: false, + expectedShouldUseOldAPI: false, + expectErr: false, + }, + { + name: "Invalid - neither ID set", + tfList: []any{ + map[string]any{}, + }, + isCreate: false, + expectedShouldUseOldAPI: false, + expectErr: true, + errMsg: "each element must have either 'replication_spec_id' or 'zone_id' set", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := cloudbackupschedule.CheckCopySettingsToUseOldAPI(tc.tfList, tc.isCreate) + if result != tc.expectedShouldUseOldAPI { + t.Errorf("%s failed: expected result %v, got %v", tc.name, tc.expectedShouldUseOldAPI, result) + } + if (err != nil) != tc.expectErr { + t.Errorf("%s failed: expected error %v, got %v", tc.name, tc.expectErr, err) + } + if err != nil && err.Error() != tc.errMsg { + t.Errorf("%s failed: expected error message %q, got %q", tc.name, tc.errMsg, err.Error()) + } + }) + } +} + func checkExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] @@ -534,10 +706,13 @@ func configDefault(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSch `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings bool, p *admin20240530.DiskBackupSnapshotSchedule) string { +func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings, useRepSpecID bool, p *admin20240530.DiskBackupSnapshotSchedule) string { var copySettings string + var dataSourceConfig string + if !emptyCopySettings { - copySettings = fmt.Sprintf(` + if useRepSpecID { + copySettings = fmt.Sprintf(` copy_settings { cloud_provider = "AWS" frequencies = ["HOURLY", @@ -550,6 +725,32 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp replication_spec_id = %[1]s.replication_specs.*.id[0] should_copy_oplogs = true }`, clusterResourceName) + + dataSourceConfig = `data "mongodbatlas_cloud_backup_schedule" "schedule_test" { + cluster_name = mongodbatlas_cloud_backup_schedule.schedule_test.cluster_name + project_id = mongodbatlas_cloud_backup_schedule.schedule_test.project_id + }` + } else { + copySettings = fmt.Sprintf(` + copy_settings { + cloud_provider = "AWS" + frequencies = ["HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY", + "YEARLY", + "ON_DEMAND"] + region_name = "US_EAST_1" + zone_id = %[1]s.replication_specs.*.zone_id[0] + should_copy_oplogs = true + }`, clusterResourceName) + + dataSourceConfig = `data "mongodbatlas_cloud_backup_schedule" "schedule_test" { + cluster_name = mongodbatlas_cloud_backup_schedule.schedule_test.cluster_name + project_id = mongodbatlas_cloud_backup_schedule.schedule_test.project_id + use_zone_id_for_copy_settings = true + }` + } } return fmt.Sprintf(` %[1]s @@ -588,7 +789,9 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp } %[7]s } - `, terraformStr, projectID, clusterResourceName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings) + + %[8]s + `, terraformStr, projectID, clusterResourceName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings, dataSourceConfig) } func configOnePolicy(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { diff --git a/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go b/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go index bf5283b9b2..ad0fd6f71c 100644 --- a/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go +++ b/internal/service/cloudbackupsnapshot/data_source_cloud_backup_snapshots.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go index 2f852f50de..8bcae4e545 100644 --- a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go +++ b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot.go @@ -4,7 +4,7 @@ import ( "errors" "regexp" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func SplitSnapshotImportID(id string) (*admin.GetReplicaSetBackupApiParams, error) { diff --git a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go index 8e2df8d6af..32bad1804a 100644 --- a/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/model_cloud_backup_snapshot_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cloudbackupsnapshot" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func TestSplitSnapshotImportID(t *testing.T) { diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go index 172f1ad22c..e8bb360817 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go @@ -14,7 +14,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cluster" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go index 993eebd793..a6cb9dec02 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go @@ -109,12 +109,14 @@ func checkExists(resourceName string) resource.TestCheckFunc { if ids["snapshot_id"] == "" { return fmt.Errorf("no ID is set") } - _, _, err := acc.ConnV2().CloudBackupsApi.GetReplicaSetBackup(context.Background(), ids["project_id"], ids["cluster_name"], ids["snapshot_id"]).Execute() - if err == nil { - return nil + if _, _, err := acc.ConnV2().CloudBackupsApi.GetReplicaSetBackup(context.Background(), ids["project_id"], ids["cluster_name"], ids["snapshot_id"]).Execute(); err != nil { + return fmt.Errorf("cloudBackupSnapshot (%s) does not exist", rs.Primary.Attributes["snapshot_id"]) } - - return fmt.Errorf("cloudBackupSnapshot (%s) does not exist", rs.Primary.Attributes["snapshot_id"]) + // needed as first call to cluster with new API will fail due to transition to ISS feature flag + if err := acc.CheckClusterExistsHandlingRetry(ids["project_id"], ids["cluster_name"]); err != nil { + return fmt.Errorf("cluster (%s : %s) does not exist", ids["project_id"], ids["cluster_name"]) + } + return nil } } diff --git a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go index 17cc0a46e1..568e4d899f 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go +++ b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_bucket.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -15,10 +14,8 @@ func DataSource() *schema.Resource { ReadContext: datasourceRead, Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0") + " Will not be an input parameter, only computed.", + Type: schema.TypeString, + Computed: true, }, "export_bucket_id": { Type: schema.TypeString, diff --git a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go index 7b6b5b19f3..c8d5354ad2 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go +++ b/internal/service/cloudbackupsnapshotexportbucket/data_source_cloud_backup_snapshot_export_buckets.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go index cfb2fc4f74..e53fe51012 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go index 4647c3a975..8354065915 100644 --- a/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go +++ b/internal/service/cloudbackupsnapshotexportbucket/resource_cloud_backup_snapshot_export_bucket_test.go @@ -253,7 +253,6 @@ func configAWSBasic(projectID, bucketName, policyName, roleName string) string { data "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { project_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.project_id export_bucket_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.export_bucket_id - id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.export_bucket_id } data "mongodbatlas_cloud_backup_snapshot_export_buckets" "test" { @@ -298,7 +297,6 @@ func configAzureBasic(projectID, atlasAzureAppID, servicePrincipalID, tenantID, data "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { project_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.project_id export_bucket_id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.export_bucket_id - id = mongodbatlas_cloud_backup_snapshot_export_bucket.test.export_bucket_id } data "mongodbatlas_cloud_backup_snapshot_export_buckets" "test" { diff --git a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go index 66c6666965..6418155b6d 100644 --- a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go +++ b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_job.go @@ -14,10 +14,8 @@ func DataSource() *schema.Resource { ReadContext: dataSourceMongoDBAtlasCloudBackupSnapshotsExportJobRead, Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0") + " Will not be an input parameter, only computed.", + Type: schema.TypeString, + Computed: true, }, "export_job_id": { Type: schema.TypeString, diff --git a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go index a29f13d3a6..2f915200ee 100644 --- a/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go +++ b/internal/service/cloudbackupsnapshotexportjob/data_source_cloud_backup_snapshot_export_jobs.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go index 8fe4a0d7a3..063909512b 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_job.go b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_job.go index 9c0e2643e0..823264886b 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_job.go +++ b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_job.go @@ -6,8 +6,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -17,7 +15,7 @@ func DataSource() *schema.Resource { Schema: map[string]*schema.Schema{ "snapshot_restore_job_id": { Type: schema.TypeString, - Optional: true, + Required: true, }, "project_id": { Type: schema.TypeString, @@ -29,22 +27,10 @@ func DataSource() *schema.Resource { Required: true, ForceNew: true, }, - "job_id": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - // When deprecating, change snapshot_restore_job_id to Required: true and implementation below - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0") + " Use snapshot_restore_job_id instead.", - }, "cancelled": { Type: schema.TypeBool, Computed: true, }, - "created_at": { - Type: schema.TypeString, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, "delivery_type": { Type: schema.TypeString, Computed: true, @@ -103,17 +89,7 @@ func DataSource() *schema.Resource { func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).AtlasV2 - var restoreID string - restoreIDRaw, restoreIDInField := d.GetOk("snapshot_restore_job_id") - if restoreIDInField { - restoreID = restoreIDRaw.(string) - } else { - idEncoded, restoreIDInField := d.GetOk("job_id") - if !restoreIDInField { - return diag.Errorf("either snapshot_restore_job_id or job_id must be set") - } - restoreID = conversion.GetEncodedID(idEncoded.(string), "snapshot_restore_job_id") - } + restoreID := d.Get("snapshot_restore_job_id").(string) projectID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) diff --git a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go index 61a80a6808..3cc07c2eba 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go +++ b/internal/service/cloudbackupsnapshotrestorejob/data_source_cloud_backup_snapshot_restore_jobs.go @@ -7,10 +7,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { @@ -46,11 +45,6 @@ func PluralDataSource() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "created_at": { - Type: schema.TypeString, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, "delivery_type": { Type: schema.TypeString, Computed: true, diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go index 2bb1ffc6a6..6700299b70 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job.go @@ -10,10 +10,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { @@ -101,11 +100,6 @@ func Resource() *schema.Resource { Type: schema.TypeBool, Computed: true, }, - "created_at": { - Type: schema.TypeString, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, "expired": { Type: schema.TypeBool, Computed: true, diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go index 3f27e3a900..c9e6b97f8f 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go @@ -177,7 +177,6 @@ func configBasic(terraformStr, clusterResourceName, description, retentionInDays data "mongodbatlas_cloud_backup_snapshot_restore_job" "test" { project_id = mongodbatlas_cloud_backup_snapshot.test.project_id cluster_name = mongodbatlas_cloud_backup_snapshot.test.cluster_name - job_id = mongodbatlas_cloud_backup_snapshot_restore_job.test.id # remove after 1.18.0 snapshot_restore_job_id = mongodbatlas_cloud_backup_snapshot_restore_job.test.snapshot_restore_job_id } diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go index 0a0a568687..f1164f6170 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go @@ -12,7 +12,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) /* diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go index dd35fc02ec..a593195062 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_setup.go @@ -6,7 +6,7 @@ import ( "net/http" "regexp" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/cluster/data_source_cluster.go b/internal/service/cluster/data_source_cluster.go index 82026a53aa..32b995875c 100644 --- a/internal/service/cluster/data_source_cluster.go +++ b/internal/service/cluster/data_source_cluster.go @@ -289,7 +289,7 @@ func DataSource() *schema.Resource { "labels": { Type: schema.TypeSet, Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByDateWithReplacement, "September 2024", "tags"), + Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { diff --git a/internal/service/cluster/data_source_clusters.go b/internal/service/cluster/data_source_clusters.go index ac63ed3e27..c5e8ac10da 100644 --- a/internal/service/cluster/data_source_clusters.go +++ b/internal/service/cluster/data_source_clusters.go @@ -292,7 +292,7 @@ func PluralDataSource() *schema.Resource { "labels": { Type: schema.TypeSet, Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByDateWithReplacement, "September 2024", "tags"), + Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { diff --git a/internal/service/cluster/resource_cluster.go b/internal/service/cluster/resource_cluster.go index 01a63dc42f..41f38f4da2 100644 --- a/internal/service/cluster/resource_cluster.go +++ b/internal/service/cluster/resource_cluster.go @@ -314,7 +314,7 @@ func Resource() *schema.Resource { Type: schema.TypeSet, Optional: true, Set: advancedcluster.HashFunctionForKeyValuePair, - Deprecated: fmt.Sprintf(constant.DeprecationParamByDateWithReplacement, "September 2024", "tags"), + Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go index d9271a7baa..6a943eab52 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/controlplaneipaddresses/model.go b/internal/service/controlplaneipaddresses/model.go index e70ec902c0..a50fee8946 100644 --- a/internal/service/controlplaneipaddresses/model.go +++ b/internal/service/controlplaneipaddresses/model.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewTFControlPlaneIPAddresses(ctx context.Context, apiResp *admin.ControlPlaneIPAddresses) (*TFControlPlaneIpAddressesModel, diag.Diagnostics) { diff --git a/internal/service/controlplaneipaddresses/model_test.go b/internal/service/controlplaneipaddresses/model_test.go index 7a4e2f48ea..5c627b5120 100644 --- a/internal/service/controlplaneipaddresses/model_test.go +++ b/internal/service/controlplaneipaddresses/model_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/controlplaneipaddresses" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) type sdkToTFModelTestCase struct { diff --git a/internal/service/customdbrole/data_source_custom_db_roles.go b/internal/service/customdbrole/data_source_custom_db_roles.go index 3f7492bbc7..dfe78c2b79 100644 --- a/internal/service/customdbrole/data_source_custom_db_roles.go +++ b/internal/service/customdbrole/data_source_custom_db_roles.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/customdbrole/resource_custom_db_role.go b/internal/service/customdbrole/resource_custom_db_role.go index 4043f34be5..239ecd3fde 100644 --- a/internal/service/customdbrole/resource_custom_db_role.go +++ b/internal/service/customdbrole/resource_custom_db_role.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/customdbrole/resource_custom_db_role_test.go b/internal/service/customdbrole/resource_custom_db_role_test.go index 8e9360f71f..02809cfba5 100644 --- a/internal/service/customdbrole/resource_custom_db_role_test.go +++ b/internal/service/customdbrole/resource_custom_db_role_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const resourceName = "mongodbatlas_custom_db_role.test" diff --git a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go index 8fea87b8d2..488fa9bef5 100644 --- a/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go +++ b/internal/service/customdnsconfigurationclusteraws/resource_custom_dns_configuration_cluster_aws.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/databaseuser/model_database_user.go b/internal/service/databaseuser/model_database_user.go index a27b018149..90c31d0bd9 100644 --- a/internal/service/databaseuser/model_database_user.go +++ b/internal/service/databaseuser/model_database_user.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewMongoDBDatabaseUser(ctx context.Context, statePasswordValue types.String, dbUserModel *TfDatabaseUserModel) (*admin.CloudDatabaseUser, diag.Diagnostics) { diff --git a/internal/service/databaseuser/model_database_user_test.go b/internal/service/databaseuser/model_database_user_test.go index c829481f22..0b0d5602a0 100644 --- a/internal/service/databaseuser/model_database_user_test.go +++ b/internal/service/databaseuser/model_database_user_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/databaseuser" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var ( diff --git a/internal/service/databaseuser/resource_database_user_migration_test.go b/internal/service/databaseuser/resource_database_user_migration_test.go index 6d37e4c860..3716139021 100644 --- a/internal/service/databaseuser/resource_database_user_migration_test.go +++ b/internal/service/databaseuser/resource_database_user_migration_test.go @@ -3,7 +3,7 @@ package databaseuser_test import ( "testing" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" diff --git a/internal/service/databaseuser/resource_database_user_test.go b/internal/service/databaseuser/resource_database_user_test.go index 1e614f5ec4..8281fe67fd 100644 --- a/internal/service/databaseuser/resource_database_user_test.go +++ b/internal/service/databaseuser/resource_database_user_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/databaseuser" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go index 25bdf48651..04a5da68a6 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_run.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const errorDataLakePipelineRunRead = "error reading MongoDB Atlas DataLake Run (%s): %s" diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go index c11ba3ae90..9c357fec0f 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipeline_runs.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const errorDataLakePipelineRunList = "error reading MongoDB Atlas DataLake Runs (%s): %s" diff --git a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go index fb4dfffbe9..1846c35428 100644 --- a/internal/service/datalakepipeline/data_source_data_lake_pipelines.go +++ b/internal/service/datalakepipeline/data_source_data_lake_pipelines.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const errorDataLakePipelineList = "error creating MongoDB Atlas DataLake Pipelines: %s" diff --git a/internal/service/datalakepipeline/resource_data_lake_pipeline.go b/internal/service/datalakepipeline/resource_data_lake_pipeline.go index dcb97f9268..0420997a30 100644 --- a/internal/service/datalakepipeline/resource_data_lake_pipeline.go +++ b/internal/service/datalakepipeline/resource_data_lake_pipeline.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/encryptionatrest/model_encryption_at_rest.go b/internal/service/encryptionatrest/model_encryption_at_rest.go index 0e40129e11..3e1eed7375 100644 --- a/internal/service/encryptionatrest/model_encryption_at_rest.go +++ b/internal/service/encryptionatrest/model_encryption_at_rest.go @@ -5,7 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewTfEncryptionAtRestRSModel(ctx context.Context, projectID string, encryptionResp *admin.EncryptionAtRest) *TfEncryptionAtRestRSModel { diff --git a/internal/service/encryptionatrest/model_encryption_at_rest_test.go b/internal/service/encryptionatrest/model_encryption_at_rest_test.go index ea426bc1a8..9786cb0fa4 100644 --- a/internal/service/encryptionatrest/model_encryption_at_rest_test.go +++ b/internal/service/encryptionatrest/model_encryption_at_rest_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/encryptionatrest" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var ( diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest.go b/internal/service/encryptionatrest/resource_encryption_at_rest.go index 010cc03f3a..fc17c1085a 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest.go @@ -24,7 +24,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/project" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go index 0c5f638c7a..cf9ed9d228 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_migration_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func TestMigEncryptionAtRest_basicAWS(t *testing.T) { diff --git a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go index 0b9980e92c..91786ca273 100644 --- a/internal/service/encryptionatrest/resource_encryption_at_rest_test.go +++ b/internal/service/encryptionatrest/resource_encryption_at_rest_test.go @@ -16,8 +16,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "go.mongodb.org/atlas-sdk/v20240805001/admin" - "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "go.mongodb.org/atlas-sdk/v20240805003/mockadmin" ) const ( diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go index 1f91f587f2..70c22fd762 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instance_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func TestAccFederatedDatabaseInstanceDS_s3Bucket(t *testing.T) { diff --git a/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go b/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go index aa29744694..78f37148ca 100644 --- a/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go +++ b/internal/service/federateddatabaseinstance/data_source_federated_database_instances.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance.go index 647b7629a8..9dde38f38f 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance.go @@ -7,7 +7,7 @@ import ( "net/http" "strings" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/federatedquerylimit/data_source_federated_query_limits.go b/internal/service/federatedquerylimit/data_source_federated_query_limits.go index c270ed8c99..7886c003ae 100644 --- a/internal/service/federatedquerylimit/data_source_federated_query_limits.go +++ b/internal/service/federatedquerylimit/data_source_federated_query_limits.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedquerylimit/resource_federated_query_limit.go b/internal/service/federatedquerylimit/resource_federated_query_limit.go index 9e8c744a26..02f2c778c9 100644 --- a/internal/service/federatedquerylimit/resource_federated_query_limit.go +++ b/internal/service/federatedquerylimit/resource_federated_query_limit.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go index 67eaee4feb..d29e30d07d 100644 --- a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go +++ b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers.go @@ -5,12 +5,11 @@ import ( "errors" "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -37,16 +36,6 @@ func PluralDataSource() *schema.Resource { }, Optional: true, }, - "page_num": { - Type: schema.TypeInt, - Optional: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, - "items_per_page": { - Type: schema.TypeInt, - Optional: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, "results": { Type: schema.TypeList, Computed: true, @@ -290,6 +279,7 @@ func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) IdpType: &idpTypes, } + // iterating all results to be implemented as part of CLOUDP-227485 providers, _, err := connV2.FederatedAuthenticationApi.ListIdentityProvidersWithParams(ctx, params).Execute() if err != nil { return diag.Errorf("error getting federatedSettings Identity Providers assigned (%s): %s", federationSettingsID, err) diff --git a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go index 65fdfa7a5c..4e68eae3aa 100644 --- a/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go +++ b/internal/service/federatedsettingsidentityprovider/data_source_federated_settings_identity_providers_test.go @@ -78,8 +78,6 @@ func configPluralDS(federatedSettingsID string, idpType *string, protocols []str return fmt.Sprintf(` data "mongodbatlas_federated_settings_identity_providers" "test" { federation_settings_id = "%[1]s" - page_num = 1 - items_per_page = 100 %[2]s %[3]s } diff --git a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go index a307e73983..9fe7309679 100644 --- a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go +++ b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" diff --git a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go index a1505b9d89..c6d031a19b 100644 --- a/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go +++ b/internal/service/federatedsettingsidentityprovider/model_federated_settings_identity_provider_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/stretchr/testify/assert" diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go index e930171af6..9bb62d1314 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func DataSourceSettings() *schema.Resource { diff --git a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go index 0aca97e00f..a07b937981 100644 --- a/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go +++ b/internal/service/federatedsettingsorgconfig/data_source_federated_settings_connected_orgs.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go b/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go index d9a8ab937d..c52b653185 100644 --- a/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go +++ b/internal/service/federatedsettingsorgconfig/model_federated_settings_connected_orgs.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) type roleMappingsByGroupName []admin.AuthFederationRoleMapping diff --git a/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go b/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go index ae8241e996..bd383db6fc 100644 --- a/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go +++ b/internal/service/federatedsettingsorgrolemapping/data_source_federated_settings_org_role_mappings.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go b/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go index 5a0208f843..3a5f2bffc6 100644 --- a/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go +++ b/internal/service/federatedsettingsorgrolemapping/model_federated_settings_org_role_mapping.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) type mRoleAssignment []admin.RoleAssignment diff --git a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go index fb5512dd1d..1f0282aa08 100644 --- a/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go +++ b/internal/service/federatedsettingsorgrolemapping/resource_federated_settings_org_role_mapping.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration.go b/internal/service/ldapconfiguration/resource_ldap_configuration.go index 9182281009..191f89e92e 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/ldapverify/resource_ldap_verify.go b/internal/service/ldapverify/resource_ldap_verify.go index e199c63e97..3e485ab487 100644 --- a/internal/service/ldapverify/resource_ldap_verify.go +++ b/internal/service/ldapverify/resource_ldap_verify.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/maintenancewindow/resource_maintenance_window.go b/internal/service/maintenancewindow/resource_maintenance_window.go index ca60b6cce1..7e0ba44162 100644 --- a/internal/service/maintenancewindow/resource_maintenance_window.go +++ b/internal/service/maintenancewindow/resource_maintenance_window.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/networkcontainer/data_source_network_containers.go b/internal/service/networkcontainer/data_source_network_containers.go index ad5218c2cf..ece872b189 100644 --- a/internal/service/networkcontainer/data_source_network_containers.go +++ b/internal/service/networkcontainer/data_source_network_containers.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/networkcontainer/resource_network_container.go b/internal/service/networkcontainer/resource_network_container.go index e404ff7df1..b597e8fb77 100644 --- a/internal/service/networkcontainer/resource_network_container.go +++ b/internal/service/networkcontainer/resource_network_container.go @@ -17,7 +17,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/networkpeering/data_source_network_peering.go b/internal/service/networkpeering/data_source_network_peering.go index f596831578..b8dfc33259 100644 --- a/internal/service/networkpeering/data_source_network_peering.go +++ b/internal/service/networkpeering/data_source_network_peering.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func DataSource() *schema.Resource { diff --git a/internal/service/networkpeering/data_source_network_peerings.go b/internal/service/networkpeering/data_source_network_peerings.go index 5412234217..97ef2598e6 100644 --- a/internal/service/networkpeering/data_source_network_peerings.go +++ b/internal/service/networkpeering/data_source_network_peerings.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/networkpeering/resource_network_peering.go b/internal/service/networkpeering/resource_network_peering.go index 23efb04908..42b417bc5a 100644 --- a/internal/service/networkpeering/resource_network_peering.go +++ b/internal/service/networkpeering/resource_network_peering.go @@ -16,7 +16,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/networkcontainer" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/onlinearchive/resource_online_archive.go b/internal/service/onlinearchive/resource_online_archive.go index d93371f089..96bdded486 100644 --- a/internal/service/onlinearchive/resource_online_archive.go +++ b/internal/service/onlinearchive/resource_online_archive.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/organization/data_source_organizations.go b/internal/service/organization/data_source_organizations.go index 484dab350a..4eda3355cb 100644 --- a/internal/service/organization/data_source_organizations.go +++ b/internal/service/organization/data_source_organizations.go @@ -5,7 +5,7 @@ import ( "fmt" "log" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/organization/resource_organization.go b/internal/service/organization/resource_organization.go index dbeaa71c81..b8d0ae473c 100644 --- a/internal/service/organization/resource_organization.go +++ b/internal/service/organization/resource_organization.go @@ -6,7 +6,7 @@ import ( "log" "net/http" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index 7b65af7ec8..3103cc900e 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -7,7 +7,7 @@ import ( "regexp" "testing" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" diff --git a/internal/service/orginvitation/resource_org_invitation.go b/internal/service/orginvitation/resource_org_invitation.go index bcdc8c7c16..fc7269e84d 100644 --- a/internal/service/orginvitation/resource_org_invitation.go +++ b/internal/service/orginvitation/resource_org_invitation.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go index afb105fdab..622410d3aa 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) type permCtxKey string @@ -89,7 +89,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).AtlasV2 - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Id() enabled := d.Get("enabled").(bool) @@ -115,7 +114,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. stateConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, conn.ClustersApi), Timeout: d.Timeout(timeoutKey.(string)), MinTimeout: 5 * time.Second, Delay: 3 * time.Second, diff --git a/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go b/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go index 261638f6a8..196b97a219 100644 --- a/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go +++ b/internal/service/privatelinkendpoint/resource_privatelink_endpoint.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go index cf58e60062..c5a3e106d3 100644 --- a/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go +++ b/internal/service/privatelinkendpointserverless/resource_privatelink_endpoint_serverless.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/privatelinkendpointservice/data_source_privatelink_endpoint_service.go b/internal/service/privatelinkendpointservice/data_source_privatelink_endpoint_service.go index ed7ce5255f..26e37dba7c 100644 --- a/internal/service/privatelinkendpointservice/data_source_privatelink_endpoint_service.go +++ b/internal/service/privatelinkendpointservice/data_source_privatelink_endpoint_service.go @@ -85,10 +85,6 @@ func DataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "service_attachment_name": { - Type: schema.TypeString, - Computed: true, - }, }, }, }, diff --git a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go index 47450f69f5..b2ab84162b 100644 --- a/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go +++ b/internal/service/privatelinkendpointservice/resource_privatelink_endpoint_service.go @@ -13,11 +13,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( @@ -120,11 +119,6 @@ func Resource() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "service_attachment_name": { - Type: schema.TypeString, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, }, }, }, @@ -142,7 +136,6 @@ func Resource() *schema.Resource { func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 projectID := d.Get("project_id").(string) privateLinkID := conversion.GetEncodedID(d.Get("private_link_id").(string), "private_link_id") providerName := d.Get("provider_name").(string) @@ -193,7 +186,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. clusterConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV2.ClustersApi), Timeout: d.Timeout(schema.TimeoutCreate), MinTimeout: 5 * time.Second, Delay: 5 * time.Minute, @@ -286,7 +279,6 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 - connV220240530 := meta.(*config.MongoDBClient).AtlasV220240530 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] @@ -318,7 +310,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. clusterConf := &retry.StateChangeConf{ Pending: []string{"REPEATING", "PENDING"}, Target: []string{"IDLE", "DELETED"}, - Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV220240530.ClustersApi), + Refresh: advancedcluster.ResourceClusterListAdvancedRefreshFunc(ctx, projectID, connV2.ClustersApi), Timeout: d.Timeout(schema.TimeoutDelete), MinTimeout: 5 * time.Second, Delay: 5 * time.Minute, diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go index e7df9475d4..f4166db1d6 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/data_source_privatelink_endpoint_service_data_federation_online_archives.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/datalakepipeline" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const errorPrivateEndpointServiceDataFederationOnlineArchiveList = "error reading Private Endpoings for projectId %s: %s" diff --git a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go index 0ae4f26ef8..2479acd3d0 100644 --- a/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go +++ b/internal/service/privatelinkendpointservicedatafederationonlinearchive/resource_privatelink_endpoint_service_data_federation_online_archive.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" diff --git a/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go b/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go index c162674bb5..d296549603 100644 --- a/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go +++ b/internal/service/privatelinkendpointserviceserverless/data_source_privatelink_endpoints_service_serverless.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go index 4f87ceecfc..b2bf1fc489 100644 --- a/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go +++ b/internal/service/privatelinkendpointserviceserverless/resource_privatelink_endpoint_service_serverless.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/project/data_source_project.go b/internal/service/project/data_source_project.go index 30bd51cdf8..085455f9dc 100644 --- a/internal/service/project/data_source_project.go +++ b/internal/service/project/data_source_project.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/datasource" diff --git a/internal/service/project/data_source_projects.go b/internal/service/project/data_source_projects.go index e2d17eb7a4..ce18dbdf39 100644 --- a/internal/service/project/data_source_projects.go +++ b/internal/service/project/data_source_projects.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const projectsDataSourceName = "projects" diff --git a/internal/service/project/model_project.go b/internal/service/project/model_project.go index 2a1ffd8e3b..349cf824bf 100644 --- a/internal/service/project/model_project.go +++ b/internal/service/project/model_project.go @@ -3,7 +3,7 @@ package project import ( "context" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" diff --git a/internal/service/project/model_project_test.go b/internal/service/project/model_project_test.go index ec139f2309..4a8a0a3928 100644 --- a/internal/service/project/model_project_test.go +++ b/internal/service/project/model_project_test.go @@ -4,7 +4,7 @@ import ( "context" "testing" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" diff --git a/internal/service/project/resource_project.go b/internal/service/project/resource_project.go index bc7671807d..e826dacc07 100644 --- a/internal/service/project/resource_project.go +++ b/internal/service/project/resource_project.go @@ -9,7 +9,7 @@ import ( "sort" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/path" diff --git a/internal/service/project/resource_project_migration_test.go b/internal/service/project/resource_project_migration_test.go index 76b5042f63..a6f48dde23 100644 --- a/internal/service/project/resource_project_migration_test.go +++ b/internal/service/project/resource_project_migration_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" diff --git a/internal/service/project/resource_project_test.go b/internal/service/project/resource_project_test.go index ef63f2852b..a4819cbde4 100644 --- a/internal/service/project/resource_project_test.go +++ b/internal/service/project/resource_project_test.go @@ -11,8 +11,8 @@ import ( "strings" "testing" - "go.mongodb.org/atlas-sdk/v20240805001/admin" - "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "go.mongodb.org/atlas-sdk/v20240805003/mockadmin" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" diff --git a/internal/service/projectapikey/data_source_project_api_keys.go b/internal/service/projectapikey/data_source_project_api_keys.go index 55af1f551b..5b8e143c37 100644 --- a/internal/service/projectapikey/data_source_project_api_keys.go +++ b/internal/service/projectapikey/data_source_project_api_keys.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/projectapikey/resource_project_api_key.go b/internal/service/projectapikey/resource_project_api_key.go index f4a6b12c1d..f6bf7d3757 100644 --- a/internal/service/projectapikey/resource_project_api_key.go +++ b/internal/service/projectapikey/resource_project_api_key.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/projectinvitation/resource_project_invitation.go b/internal/service/projectinvitation/resource_project_invitation.go index 8ca1b7c199..37557b3f09 100644 --- a/internal/service/projectinvitation/resource_project_invitation.go +++ b/internal/service/projectinvitation/resource_project_invitation.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func Resource() *schema.Resource { diff --git a/internal/service/projectipaccesslist/model_project_ip_access_list.go b/internal/service/projectipaccesslist/model_project_ip_access_list.go index 12c7bae998..5c1c7076fd 100644 --- a/internal/service/projectipaccesslist/model_project_ip_access_list.go +++ b/internal/service/projectipaccesslist/model_project_ip_access_list.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewMongoDBProjectIPAccessList(projectIPAccessListModel *TfProjectIPAccessListModel) *[]admin.NetworkPermissionEntry { diff --git a/internal/service/projectipaccesslist/model_project_ip_access_list_test.go b/internal/service/projectipaccesslist/model_project_ip_access_list_test.go index e51f4a4787..a506d05713 100644 --- a/internal/service/projectipaccesslist/model_project_ip_access_list_test.go +++ b/internal/service/projectipaccesslist/model_project_ip_access_list_test.go @@ -9,7 +9,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/projectipaccesslist" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var ( diff --git a/internal/service/projectipaccesslist/resource_project_ip_access_list.go b/internal/service/projectipaccesslist/resource_project_ip_access_list.go index 144d587ce1..8be3c3bf8d 100644 --- a/internal/service/projectipaccesslist/resource_project_ip_access_list.go +++ b/internal/service/projectipaccesslist/resource_project_ip_access_list.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" diff --git a/internal/service/pushbasedlogexport/model.go b/internal/service/pushbasedlogexport/model.go index 0238196c0b..3c2106b7fd 100644 --- a/internal/service/pushbasedlogexport/model.go +++ b/internal/service/pushbasedlogexport/model.go @@ -3,7 +3,7 @@ package pushbasedlogexport import ( "context" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/diag" diff --git a/internal/service/pushbasedlogexport/model_test.go b/internal/service/pushbasedlogexport/model_test.go index c0523a6c00..a967e3dd7c 100644 --- a/internal/service/pushbasedlogexport/model_test.go +++ b/internal/service/pushbasedlogexport/model_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/types" diff --git a/internal/service/pushbasedlogexport/resource.go b/internal/service/pushbasedlogexport/resource.go index dfebae9189..dc35ad1c5a 100644 --- a/internal/service/pushbasedlogexport/resource.go +++ b/internal/service/pushbasedlogexport/resource.go @@ -7,7 +7,7 @@ import ( "slices" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" diff --git a/internal/service/pushbasedlogexport/state_transition.go b/internal/service/pushbasedlogexport/state_transition.go index e8c1283339..84f5c23c2e 100644 --- a/internal/service/pushbasedlogexport/state_transition.go +++ b/internal/service/pushbasedlogexport/state_transition.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" diff --git a/internal/service/pushbasedlogexport/state_transition_test.go b/internal/service/pushbasedlogexport/state_transition_test.go index d49f0757b3..20d725bfdb 100644 --- a/internal/service/pushbasedlogexport/state_transition_test.go +++ b/internal/service/pushbasedlogexport/state_transition_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" - "go.mongodb.org/atlas-sdk/v20240805001/admin" - "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "go.mongodb.org/atlas-sdk/v20240805003/mockadmin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" diff --git a/internal/service/searchdeployment/model_search_deployment.go b/internal/service/searchdeployment/model_search_deployment.go index 8548aacf19..90c6fcaa5b 100644 --- a/internal/service/searchdeployment/model_search_deployment.go +++ b/internal/service/searchdeployment/model_search_deployment.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewSearchDeploymentReq(ctx context.Context, searchDeploymentPlan *TFSearchDeploymentRSModel) admin.ApiSearchDeploymentRequest { diff --git a/internal/service/searchdeployment/model_search_deployment_test.go b/internal/service/searchdeployment/model_search_deployment_test.go index e82b8a6ff7..716be9b7d7 100644 --- a/internal/service/searchdeployment/model_search_deployment_test.go +++ b/internal/service/searchdeployment/model_search_deployment_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/searchdeployment" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) type sdkToTFModelTestCase struct { diff --git a/internal/service/searchdeployment/state_transition_search_deployment.go b/internal/service/searchdeployment/state_transition_search_deployment.go index 98c992be4c..5da0158914 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment.go +++ b/internal/service/searchdeployment/state_transition_search_deployment.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/retrystrategy" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const SearchDeploymentDoesNotExistsError = "ATLAS_SEARCH_DEPLOYMENT_DOES_NOT_EXIST" diff --git a/internal/service/searchdeployment/state_transition_search_deployment_test.go b/internal/service/searchdeployment/state_transition_search_deployment_test.go index a004a1e4eb..8024a56771 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment_test.go +++ b/internal/service/searchdeployment/state_transition_search_deployment_test.go @@ -12,8 +12,8 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/searchdeployment" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "go.mongodb.org/atlas-sdk/v20240805001/admin" - "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "go.mongodb.org/atlas-sdk/v20240805003/mockadmin" ) var ( diff --git a/internal/service/searchindex/data_source_search_indexes.go b/internal/service/searchindex/data_source_search_indexes.go index d3bd55bc8f..951fc6fc25 100644 --- a/internal/service/searchindex/data_source_search_indexes.go +++ b/internal/service/searchindex/data_source_search_indexes.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/searchindex/model_search_index.go b/internal/service/searchindex/model_search_index.go index 85878f5cab..fdad9a06e0 100644 --- a/internal/service/searchindex/model_search_index.go +++ b/internal/service/searchindex/model_search_index.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/schemafunc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func flattenSearchIndexSynonyms(synonyms []admin.SearchSynonymMappingDefinition) []map[string]any { diff --git a/internal/service/searchindex/resource_search_index.go b/internal/service/searchindex/resource_search_index.go index 559202413b..f36233f052 100644 --- a/internal/service/searchindex/resource_search_index.go +++ b/internal/service/searchindex/resource_search_index.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/serverlessinstance/data_source_serverless_instances.go b/internal/service/serverlessinstance/data_source_serverless_instances.go index 52f089258e..b2aacc74a5 100644 --- a/internal/service/serverlessinstance/data_source_serverless_instances.go +++ b/internal/service/serverlessinstance/data_source_serverless_instances.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/serverlessinstance/resource_serverless_instance.go b/internal/service/serverlessinstance/resource_serverless_instance.go index 828a7eaa03..c89b9b1abf 100644 --- a/internal/service/serverlessinstance/resource_serverless_instance.go +++ b/internal/service/serverlessinstance/resource_serverless_instance.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/serverlessinstance/resource_serverless_instance_test.go b/internal/service/serverlessinstance/resource_serverless_instance_test.go index a527d70629..c7b92a7102 100644 --- a/internal/service/serverlessinstance/resource_serverless_instance_test.go +++ b/internal/service/serverlessinstance/resource_serverless_instance_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go b/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go index 112ecf1086..d6b4ce47fa 100644 --- a/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go +++ b/internal/service/sharedtier/data_source_cloud_shared_tier_restore_jobs.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/sharedtier/data_source_shared_tier_snapshots.go b/internal/service/sharedtier/data_source_shared_tier_snapshots.go index 7654136b99..716577d600 100644 --- a/internal/service/sharedtier/data_source_shared_tier_snapshots.go +++ b/internal/service/sharedtier/data_source_shared_tier_snapshots.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" diff --git a/internal/service/streamconnection/data_source_stream_connections.go b/internal/service/streamconnection/data_source_stream_connections.go index 3800fc1052..3ac0eb0228 100644 --- a/internal/service/streamconnection/data_source_stream_connections.go +++ b/internal/service/streamconnection/data_source_stream_connections.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var _ datasource.DataSource = &streamConnectionsDS{} diff --git a/internal/service/streamconnection/data_source_stream_connections_test.go b/internal/service/streamconnection/data_source_stream_connections_test.go index 47af9736cc..e3685654b8 100644 --- a/internal/service/streamconnection/data_source_stream_connections_test.go +++ b/internal/service/streamconnection/data_source_stream_connections_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func TestAccStreamDSStreamConnections_basic(t *testing.T) { diff --git a/internal/service/streamconnection/model_stream_connection.go b/internal/service/streamconnection/model_stream_connection.go index 142efd7146..bfff03b0d2 100644 --- a/internal/service/streamconnection/model_stream_connection.go +++ b/internal/service/streamconnection/model_stream_connection.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewStreamConnectionReq(ctx context.Context, plan *TFStreamConnectionModel) (*admin.StreamsConnection, diag.Diagnostics) { diff --git a/internal/service/streamconnection/model_stream_connection_test.go b/internal/service/streamconnection/model_stream_connection_test.go index c60e122983..bf8de03933 100644 --- a/internal/service/streamconnection/model_stream_connection_test.go +++ b/internal/service/streamconnection/model_stream_connection_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamconnection" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/streaminstance/data_source_stream_instances.go b/internal/service/streaminstance/data_source_stream_instances.go index 898ffc3ae3..4159388c6d 100644 --- a/internal/service/streaminstance/data_source_stream_instances.go +++ b/internal/service/streaminstance/data_source_stream_instances.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var _ datasource.DataSource = &streamInstancesDS{} diff --git a/internal/service/streaminstance/data_source_stream_instances_test.go b/internal/service/streaminstance/data_source_stream_instances_test.go index 37b952ad9b..94dc32f21f 100644 --- a/internal/service/streaminstance/data_source_stream_instances_test.go +++ b/internal/service/streaminstance/data_source_stream_instances_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func TestAccStreamDSStreamInstances_basic(t *testing.T) { diff --git a/internal/service/streaminstance/model_stream_instance.go b/internal/service/streaminstance/model_stream_instance.go index e11f7f3c06..f8b4c4d7f4 100644 --- a/internal/service/streaminstance/model_stream_instance.go +++ b/internal/service/streaminstance/model_stream_instance.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewStreamInstanceCreateReq(ctx context.Context, plan *TFStreamInstanceModel) (*admin.StreamsTenant, diag.Diagnostics) { diff --git a/internal/service/streaminstance/model_stream_instance_test.go b/internal/service/streaminstance/model_stream_instance_test.go index 94d69cb194..3e9ed8b95e 100644 --- a/internal/service/streaminstance/model_stream_instance_test.go +++ b/internal/service/streaminstance/model_stream_instance_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streaminstance" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/streamprocessor/data_source_plural.go b/internal/service/streamprocessor/data_source_plural.go index f462756706..faec4e6a82 100644 --- a/internal/service/streamprocessor/data_source_plural.go +++ b/internal/service/streamprocessor/data_source_plural.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func (d *streamProcessorsDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { diff --git a/internal/service/streamprocessor/model.go b/internal/service/streamprocessor/model.go index cba2619fd5..f35a5eb492 100644 --- a/internal/service/streamprocessor/model.go +++ b/internal/service/streamprocessor/model.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/fwtypes" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func NewStreamProcessorReq(ctx context.Context, plan *TFStreamProcessorRSModel) (*admin.StreamsProcessor, diag.Diagnostics) { diff --git a/internal/service/streamprocessor/model_test.go b/internal/service/streamprocessor/model_test.go index 46f1857e1f..9ba6cb6da9 100644 --- a/internal/service/streamprocessor/model_test.go +++ b/internal/service/streamprocessor/model_test.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/schemafunc" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamprocessor" "github.com/stretchr/testify/assert" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var ( diff --git a/internal/service/streamprocessor/resource.go b/internal/service/streamprocessor/resource.go index 5ed930161b..31104f7e21 100644 --- a/internal/service/streamprocessor/resource.go +++ b/internal/service/streamprocessor/resource.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const StreamProcessorName = "stream_processor" diff --git a/internal/service/streamprocessor/state_transition.go b/internal/service/streamprocessor/state_transition.go index 052c804341..66bfc62289 100644 --- a/internal/service/streamprocessor/state_transition.go +++ b/internal/service/streamprocessor/state_transition.go @@ -8,7 +8,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/streamprocessor/state_transition_test.go b/internal/service/streamprocessor/state_transition_test.go index 62872712b4..783e41006a 100644 --- a/internal/service/streamprocessor/state_transition_test.go +++ b/internal/service/streamprocessor/state_transition_test.go @@ -6,8 +6,8 @@ import ( "net/http" "testing" - "go.mongodb.org/atlas-sdk/v20240805001/admin" - "go.mongodb.org/atlas-sdk/v20240805001/mockadmin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" + "go.mongodb.org/atlas-sdk/v20240805003/mockadmin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" diff --git a/internal/service/team/data_source_team.go b/internal/service/team/data_source_team.go index 6ac8288b76..889d4b09bb 100644 --- a/internal/service/team/data_source_team.go +++ b/internal/service/team/data_source_team.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func DataSource() *schema.Resource { diff --git a/internal/service/team/resource_team.go b/internal/service/team/resource_team.go index 3fd175f037..a824e6d189 100644 --- a/internal/service/team/resource_team.go +++ b/internal/service/team/resource_team.go @@ -15,7 +15,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/service/thirdpartyintegration/data_source_third_party_integration.go b/internal/service/thirdpartyintegration/data_source_third_party_integration.go index f579223c04..d26e9f581b 100644 --- a/internal/service/thirdpartyintegration/data_source_third_party_integration.go +++ b/internal/service/thirdpartyintegration/data_source_third_party_integration.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -98,11 +97,6 @@ func thirdPartyIntegrationSchema() *schema.Resource { Sensitive: true, Optional: true, }, - "scheme": { - Type: schema.TypeString, - Optional: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, "enabled": { Type: schema.TypeBool, Optional: true, diff --git a/internal/service/thirdpartyintegration/data_source_third_party_integrations.go b/internal/service/thirdpartyintegration/data_source_third_party_integrations.go index a4d5fcca9f..2ad05b6331 100644 --- a/internal/service/thirdpartyintegration/data_source_third_party_integrations.go +++ b/internal/service/thirdpartyintegration/data_source_third_party_integrations.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func PluralDataSource() *schema.Resource { diff --git a/internal/service/thirdpartyintegration/resource_third_party_integration.go b/internal/service/thirdpartyintegration/resource_third_party_integration.go index b34293a077..13f2a3071d 100644 --- a/internal/service/thirdpartyintegration/resource_third_party_integration.go +++ b/internal/service/thirdpartyintegration/resource_third_party_integration.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -125,12 +124,6 @@ func Resource() *schema.Resource { Computed: true, Sensitive: true, }, - "scheme": { - Type: schema.TypeString, - Computed: true, - Optional: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamByVersion, "1.18.0"), - }, "enabled": { Type: schema.TypeBool, Computed: true, diff --git a/internal/service/thirdpartyintegration/resource_third_party_integration_test.go b/internal/service/thirdpartyintegration/resource_third_party_integration_test.go index c73f0f534c..e706608f30 100644 --- a/internal/service/thirdpartyintegration/resource_third_party_integration_test.go +++ b/internal/service/thirdpartyintegration/resource_third_party_integration_test.go @@ -210,7 +210,6 @@ func prometheusTest(tb testing.TB) *resource.TestCase { updatedUsername = "otheruser" password = "somepassword" serviceDiscovery = "http" - scheme = "https" intType = "PROMETHEUS" ) return &resource.TestCase{ @@ -219,21 +218,20 @@ func prometheusTest(tb testing.TB) *resource.TestCase { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configPrometheus(projectID, username, password, serviceDiscovery, scheme), + Config: configPrometheus(projectID, username, password, serviceDiscovery), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), resource.TestCheckResourceAttr(resourceName, "user_name", username), resource.TestCheckResourceAttr(resourceName, "password", password), resource.TestCheckResourceAttr(resourceName, "service_discovery", serviceDiscovery), - resource.TestCheckResourceAttr(resourceName, "scheme", scheme), resource.TestCheckResourceAttr(dataSourceName, "type", intType), resource.TestCheckResourceAttr(dataSourceName, "user_name", username), resource.TestCheckResourceAttr(dataSourceName, "service_discovery", serviceDiscovery), ), }, { - Config: configPrometheus(projectID, updatedUsername, password, serviceDiscovery, scheme), + Config: configPrometheus(projectID, updatedUsername, password, serviceDiscovery), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "type", intType), @@ -442,7 +440,7 @@ func configDatadog(projectID, apiKey, region string) string { ) + singularDataStr } -func configPrometheus(projectID, username, password, serviceDiscovery, scheme string) string { +func configPrometheus(projectID, username, password, serviceDiscovery string) string { return fmt.Sprintf(` resource "mongodbatlas_third_party_integration" "test" { project_id = "%[1]s" @@ -450,7 +448,6 @@ func configPrometheus(projectID, username, password, serviceDiscovery, scheme st user_name = "%[3]s" password = "%[4]s" service_discovery = "%[5]s" - scheme = "%[6]s" enabled = true } `, @@ -459,7 +456,6 @@ func configPrometheus(projectID, username, password, serviceDiscovery, scheme st username, password, serviceDiscovery, - scheme, ) + singularDataStr } diff --git a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go index 0044516409..35dccf6a41 100644 --- a/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go +++ b/internal/service/x509authenticationdatabaseuser/resource_x509_authentication_database_user.go @@ -11,7 +11,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/spf13/cast" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( diff --git a/internal/testutil/acc/advanced_cluster.go b/internal/testutil/acc/advanced_cluster.go index 45ccad7a9e..4e250af483 100644 --- a/internal/testutil/acc/advanced_cluster.go +++ b/internal/testutil/acc/advanced_cluster.go @@ -3,9 +3,12 @@ package acc import ( "context" "fmt" + "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) var ( @@ -50,3 +53,17 @@ func ImportStateClusterIDFunc(resourceName string) resource.ImportStateIdFunc { return fmt.Sprintf("%s-%s", rs.Primary.Attributes["project_id"], rs.Primary.Attributes["name"]), nil } } + +func CheckClusterExistsHandlingRetry(projectID, clusterName string) error { + return retry.RetryContext(context.Background(), 3*time.Minute, func() *retry.RetryError { + _, _, err := ConnV2().ClustersApi.GetCluster(context.Background(), projectID, clusterName).Execute() + if apiError, ok := admin.AsError(err); ok { + if apiError.GetErrorCode() == "SERVICE_UNAVAILABLE" { + // retrying get operation because for migration test it can be the first time new API is called for a cluster so API responds with temporary error as it transition to enabling ISS FF + return retry.RetryableError(err) + } + return retry.NonRetryableError(err) + } + return nil + }) +} diff --git a/internal/testutil/acc/atlas.go b/internal/testutil/acc/atlas.go index 2fa121b10f..18ac6d7aac 100644 --- a/internal/testutil/acc/atlas.go +++ b/internal/testutil/acc/atlas.go @@ -10,7 +10,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func createProject(tb testing.TB, name string) string { @@ -38,7 +38,7 @@ func createCluster(tb testing.TB, projectID, name string) string { _, _, err := ConnV2().ClustersApi.CreateCluster(context.Background(), projectID, &req).Execute() require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) - stateConf := advancedcluster.CreateStateChangeConfig(context.Background(), ConnV220240530(), projectID, name, 1*time.Hour) + stateConf := advancedcluster.CreateStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) _, err = stateConf.WaitForStateContext(context.Background()) require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) @@ -50,7 +50,7 @@ func deleteCluster(projectID, name string) { if err != nil { fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) } - stateConf := advancedcluster.DeleteStateChangeConfig(context.Background(), ConnV220240530(), projectID, name, 1*time.Hour) + stateConf := advancedcluster.DeleteStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) _, err = stateConf.WaitForStateContext(context.Background()) if err != nil { fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index 615a5430a1..00a29a0fe8 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -7,7 +7,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) // ClusterRequest contains configuration for a cluster where all fields are optional and AddDefaults is used for required fields. diff --git a/internal/testutil/acc/config_cluster.go b/internal/testutil/acc/config_cluster.go index c21501224d..9a34521bc3 100644 --- a/internal/testutil/acc/config_cluster.go +++ b/internal/testutil/acc/config_cluster.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func ClusterDatasourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { diff --git a/internal/testutil/acc/database_user.go b/internal/testutil/acc/database_user.go index 4189186e73..60778b4637 100644 --- a/internal/testutil/acc/database_user.go +++ b/internal/testutil/acc/database_user.go @@ -3,7 +3,7 @@ package acc import ( "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func ConfigDatabaseUserBasic(projectID, username, roleName, keyLabel, valueLabel string) string { diff --git a/internal/testutil/acc/factory.go b/internal/testutil/acc/factory.go index 608615d2ab..43a0e6028f 100644 --- a/internal/testutil/acc/factory.go +++ b/internal/testutil/acc/factory.go @@ -9,8 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/provider" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) const ( @@ -40,10 +39,6 @@ func ConnV2() *admin.APIClient { return MongoDBClient.AtlasV2 } -func ConnV220240530() *admin20240530.APIClient { - return MongoDBClient.AtlasV220240530 -} - func ConnV2UsingProxy(proxyPort *int) *admin.APIClient { cfg := config.Config{ PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), diff --git a/internal/testutil/acc/project.go b/internal/testutil/acc/project.go index 1f4b1bbe35..3d8435fed4 100644 --- a/internal/testutil/acc/project.go +++ b/internal/testutil/acc/project.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func CheckDestroyProject(s *terraform.State) error { diff --git a/internal/testutil/acc/serverless.go b/internal/testutil/acc/serverless.go index 0453b5af57..114d8aadf6 100644 --- a/internal/testutil/acc/serverless.go +++ b/internal/testutil/acc/serverless.go @@ -3,7 +3,7 @@ package acc import ( "fmt" - "go.mongodb.org/atlas-sdk/v20240805001/admin" + "go.mongodb.org/atlas-sdk/v20240805003/admin" ) func ConfigServerlessInstance(projectID, name string, ignoreConnectionStrings bool, autoIndexing *bool, tags []admin.ResourceTag) string { diff --git a/scripts/changelog/release-note.tmpl b/scripts/changelog/release-note.tmpl index 78ae791587..e9c9271a6c 100644 --- a/scripts/changelog/release-note.tmpl +++ b/scripts/changelog/release-note.tmpl @@ -4,7 +4,7 @@ {{- else if eq "new-datasource" .Type -}} * **New Data Source:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/{{- .Issue -}})) {{- else if eq "new-guide" .Type -}} -* **New Guide:** `{{.Body}}` ([#{{- .Issue -}}](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/{{- .Issue -}})) +* **New Guide:** {{.Body}} ([#{{- .Issue -}}](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/{{- .Issue -}})) {{- else -}} * {{.Body}} ([#{{- .Issue -}}](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/{{- .Issue -}})) {{- end -}} diff --git a/templates/resources/push_based_log_export.md.tmpl b/templates/resources/push_based_log_export.md.tmpl index aadfb9d954..53f17c8e32 100644 --- a/templates/resources/push_based_log_export.md.tmpl +++ b/templates/resources/push_based_log_export.md.tmpl @@ -3,6 +3,8 @@ `{{.Name}}` provides a resource for push-based log export feature. The resource lets you configure, enable & disable the project level settings for the push-based log export feature. Using this resource you can continually push logs from mongod, mongos, and audit logs to an Amazon S3 bucket. Atlas exports logs every 5 minutes. +The [push based log export Terraform module](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/push-based-log-export/mongodbatlas/latest) makes use of this resource and simplifies its use. + ## Example Usages