diff --git a/.changelog/2585.txt b/.changelog/2585.txt new file mode 100644 index 0000000000..5b474f0050 --- /dev/null +++ b/.changelog/2585.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +resource/mongodbatlas_resource_policy +``` diff --git a/.changelog/2598.txt b/.changelog/2598.txt new file mode 100644 index 0000000000..9728b4a224 --- /dev/null +++ b/.changelog/2598.txt @@ -0,0 +1,7 @@ +```release-note:new-datasource +data-source/mongodbatlas_resource_policy +``` + +```release-note:new-datasource +data-source/mongodbatlas_resource_policies +``` diff --git a/.changelog/2600.txt b/.changelog/2600.txt new file mode 100644 index 0000000000..7cf97636b7 --- /dev/null +++ b/.changelog/2600.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/mongodbatlas_advanced_cluster: Supports `redact_client_log_data` attribute +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_cluster: Supports `redact_client_log_data` attribute +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_clusters: Supports `redact_client_log_data` attribute +``` diff --git a/.changelog/2601.txt b/.changelog/2601.txt new file mode 100644 index 0000000000..e7c27a052b --- /dev/null +++ b/.changelog/2601.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/mongodbatlas_cluster: Supports `redact_client_log_data` attribute +``` + +```release-note:enhancement +data-source/mongodbatlas_cluster: Supports `redact_client_log_data` attribute +``` + +```release-note:enhancement +data-source/mongodbatlas_clusters: Supports `redact_client_log_data` attribute +``` diff --git a/.changelog/2669.txt b/.changelog/2669.txt new file mode 100644 index 0000000000..387213d465 --- /dev/null +++ b/.changelog/2669.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_team: Fixes update logic of `usernames` attribute ensuring team is never emptied +``` diff --git a/.changelog/2670.txt b/.changelog/2670.txt new file mode 100644 index 0000000000..981dbefa4d --- /dev/null +++ b/.changelog/2670.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields +``` + +```release-note:enhancement +data-source/mongodbatlas_advanced_clusters: Adds new `config_server_management_mode` and `config_server_type` fields +``` diff --git a/.changelog/2684.txt b/.changelog/2684.txt new file mode 100644 index 0000000000..26737c7b74 --- /dev/null +++ b/.changelog/2684.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_organization: Avoids inconsistent result returned by provider when `USER_NOT_FOUND` +``` diff --git a/.changelog/2690.txt b/.changelog/2690.txt new file mode 100644 index 0000000000..75b92e277a --- /dev/null +++ b/.changelog/2690.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_event_trigger: Always includes `disabled` in the PUT payload +``` diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index b2c7bf5ce0..5e64956b37 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -38,9 +38,6 @@ on: mongodb_atlas_teams_ids: type: string required: true - mongodb_atlas_username: - type: string - required: true azure_atlas_app_id: type: string required: true @@ -80,6 +77,9 @@ on: mongodb_atlas_gov_base_url: type: string required: true + mongodb_atlas_rp_org_id: + type: string + required: true mongodb_atlas_gov_project_owner_id: type: string required: true @@ -134,6 +134,10 @@ on: required: true mongodb_atlas_gov_public_key: required: true + mongodb_atlas_rp_private_key: + required: true + mongodb_atlas_rp_public_key: + required: true azure_directory_id: required: true azure_resource_group_name: @@ -183,7 +187,7 @@ jobs: provider_version: ${{ inputs.provider_version || steps.get_last_release.outputs.last_provider_version }} steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - name: Get Last Release id: get_last_release shell: bash @@ -199,6 +203,7 @@ jobs: mustTrigger: ${{ github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && inputs.test_group == '' ) }} outputs: # ensure resources are sorted alphabetically advanced_cluster: ${{ steps.filter.outputs.advanced_cluster == 'true' || env.mustTrigger == 'true' }} + advanced_cluster_tpf: ${{ steps.filter.outputs.advanced_cluster_tpf == 'true' || env.mustTrigger == 'true' }} assume_role: ${{ steps.filter.outputs.assume_role == 'true' || env.mustTrigger == 'true' }} backup: ${{ steps.filter.outputs.backup == 'true' || env.mustTrigger == 'true' }} control_plane_ip_addresses: ${{ steps.filter.outputs.control_plane_ip_addresses == 'true' || env.mustTrigger == 'true' }} @@ -215,12 +220,13 @@ jobs: network: ${{ steps.filter.outputs.network == 'true' || env.mustTrigger == 'true' }} project: ${{ steps.filter.outputs.project == 'true' || env.mustTrigger == 'true' }} push_based_log_export: ${{ steps.filter.outputs.push_based_log_export == 'true' || env.mustTrigger == 'true' }} + resource_policy: ${{ steps.filter.outputs.resource_policy == 'true' || env.mustTrigger == 'true' }} search_deployment: ${{ steps.filter.outputs.search_deployment == 'true' || env.mustTrigger == 'true' }} search_index: ${{ steps.filter.outputs.search_index == 'true' || env.mustTrigger == 'true' }} serverless: ${{ steps.filter.outputs.serverless == 'true' || env.mustTrigger == 'true' }} stream: ${{ steps.filter.outputs.stream == 'true' || env.mustTrigger == 'true' }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 id: filter if: ${{ inputs.test_group == '' && env.mustTrigger == 'false' }} @@ -228,6 +234,8 @@ jobs: filters: | advanced_cluster: - 'internal/service/advancedcluster/*.go' + advanced_cluster_tpf: + - 'internal/service/advancedclustertpf/*.go' assume_role: - 'internal/provider/*.go' backup: @@ -295,6 +303,8 @@ jobs: - 'internal/service/projectipaddresses/*.go' push_based_log_export: - 'internal/service/pushbasedlogexport/*.go' + resource_policy: + - 'internal/service/resourcepolicy/*.go' search_deployment: - 'internal/service/searchdeployment/*.go' search_index: @@ -314,7 +324,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -330,13 +340,37 @@ jobs: ACCTEST_PACKAGES: ./internal/service/advancedcluster run: make testacc + advanced_cluster_tpf: + needs: [ change-detection, get-provider-version ] + if: ${{ needs.change-detection.outputs.advanced_cluster_tpf == 'true' || inputs.test_group == 'advanced_cluster_tpf' }} + runs-on: ubuntu-latest + permissions: {} + steps: + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 + with: + ref: ${{ inputs.ref || github.ref }} + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 + with: + go-version-file: 'go.mod' + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd + with: + terraform_version: ${{ inputs.terraform_version }} + terraform_wrapper: false + - name: Prepare new advanced_cluster + run: make tools enable-advancedclustertpf + - name: Acceptance Tests + env: + MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} + ACCTEST_PACKAGES: ./internal/service/advancedclustertpf + run: make testacc + assume_role: needs: [ change-detection, get-provider-version ] if: ${{ needs.change-detection.outputs.assume_role == 'true' || inputs.test_group == 'assume_role' }} runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -376,7 +410,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -414,7 +448,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -436,7 +470,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -458,7 +492,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -480,7 +514,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -493,7 +527,8 @@ jobs: - name: Acceptance Tests env: MONGODB_ATLAS_PROJECT_OWNER_ID: ${{ inputs.mongodb_atlas_project_owner_id }} - MONGODB_ATLAS_USERNAME: ${{ inputs.mongodb_atlas_username }} + MONGODB_ATLAS_USERNAME: ${{ vars.MONGODB_ATLAS_USERNAME }} + MONGODB_ATLAS_USERNAME_2: ${{ vars.MONGODB_ATLAS_USERNAME_2 }} AZURE_ATLAS_APP_ID: ${{ inputs.azure_atlas_app_id }} AZURE_SERVICE_PRINCIPAL_ID: ${{ inputs.azure_service_principal_id }} AZURE_TENANT_ID: ${{ inputs.azure_tenant_id }} @@ -525,7 +560,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -561,7 +596,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -583,7 +618,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -621,7 +656,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -648,7 +683,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -670,7 +705,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -699,7 +734,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -721,7 +756,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -766,7 +801,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -804,7 +839,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -823,13 +858,13 @@ jobs: ACCTEST_PACKAGES: ./internal/service/pushbasedlogexport run: make testacc - search_deployment: + resource_policy: needs: [ change-detection, get-provider-version ] - if: ${{ needs.change-detection.outputs.search_deployment == 'true' || inputs.test_group == 'search_deployment' }} + if: ${{ needs.change-detection.outputs.resource_policy == 'true' || inputs.test_group == 'resource_policy' }} runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -841,17 +876,22 @@ jobs: terraform_wrapper: false - name: Acceptance Tests env: + MONGODB_ATLAS_ORG_ID: ${{ inputs.mongodb_atlas_rp_org_id }} + MONGODB_ATLAS_PUBLIC_KEY: ${{ secrets.mongodb_atlas_rp_public_key }} + MONGODB_ATLAS_PRIVATE_KEY: ${{ secrets.mongodb_atlas_rp_private_key }} MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} - ACCTEST_PACKAGES: ./internal/service/searchdeployment + MONGODB_ATLAS_ENABLE_PREVIEW: "true" + ACCTEST_PACKAGES: | + ./internal/service/resourcepolicy run: make testacc - - search_index: + + search_deployment: needs: [ change-detection, get-provider-version ] - if: ${{ needs.change-detection.outputs.search_index == 'true' || inputs.test_group == 'search_index' }} + if: ${{ needs.change-detection.outputs.search_deployment == 'true' || inputs.test_group == 'search_deployment' }} runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -860,20 +900,20 @@ jobs: - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} - terraform_wrapper: false + terraform_wrapper: false - name: Acceptance Tests env: MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} - ACCTEST_PACKAGES: ./internal/service/searchindex + ACCTEST_PACKAGES: ./internal/service/searchdeployment run: make testacc - - serverless: + + search_index: needs: [ change-detection, get-provider-version ] - if: ${{ needs.change-detection.outputs.serverless == 'true' || inputs.test_group == 'serverless' }} + if: ${{ needs.change-detection.outputs.search_index == 'true' || inputs.test_group == 'search_index' }} runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -885,22 +925,17 @@ jobs: terraform_wrapper: false - name: Acceptance Tests env: - AWS_ACCESS_KEY_ID: ${{ secrets.aws_access_key_id }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.aws_secret_access_key }} MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} - ACCTEST_PACKAGES: | - ./internal/service/privatelinkendpointserverless - ./internal/service/privatelinkendpointserviceserverless - ./internal/service/serverlessinstance + ACCTEST_PACKAGES: ./internal/service/searchindex run: make testacc - stream: + serverless: needs: [ change-detection, get-provider-version ] - if: ${{ needs.change-detection.outputs.stream == 'true' || inputs.test_group == 'stream' }} + if: ${{ needs.change-detection.outputs.serverless == 'true' || inputs.test_group == 'serverless' }} runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.ref || github.ref }} - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 @@ -909,12 +944,38 @@ jobs: - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd with: terraform_version: ${{ inputs.terraform_version }} - terraform_wrapper: false + terraform_wrapper: false - name: Acceptance Tests env: + AWS_ACCESS_KEY_ID: ${{ secrets.aws_access_key_id }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.aws_secret_access_key }} MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} ACCTEST_PACKAGES: | - ./internal/service/streamconnection - ./internal/service/streaminstance - ./internal/service/streamprocessor + ./internal/service/privatelinkendpointserverless + ./internal/service/privatelinkendpointserviceserverless + ./internal/service/serverlessinstance run: make testacc + stream: + needs: [ change-detection, get-provider-version ] + if: ${{ needs.change-detection.outputs.stream == 'true' || inputs.test_group == 'stream' }} + runs-on: ubuntu-latest + permissions: {} + steps: + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 + with: + ref: ${{ inputs.ref || github.ref }} + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 + with: + go-version-file: 'go.mod' + - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd + with: + terraform_version: ${{ inputs.terraform_version }} + terraform_wrapper: false + - name: Acceptance Tests + env: + MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} + ACCTEST_PACKAGES: | + ./internal/service/streamconnection + ./internal/service/streaminstance + ./internal/service/streamprocessor + run: make testacc diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index e15be2a2c4..89affaa5ac 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -57,6 +57,8 @@ jobs: mongodb_atlas_private_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_PRIVATE_KEY_CLOUD_QA || secrets.MONGODB_ATLAS_PRIVATE_KEY_CLOUD_DEV }} mongodb_atlas_gov_public_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_GOV_PUBLIC_KEY_QA || secrets.MONGODB_ATLAS_GOV_PUBLIC_KEY_DEV }} mongodb_atlas_gov_private_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_GOV_PRIVATE_KEY_QA || secrets.MONGODB_ATLAS_GOV_PRIVATE_KEY_DEV }} + mongodb_atlas_rp_public_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_RP_PUBLIC_KEY_QA || secrets.MONGODB_ATLAS_RP_PUBLIC_KEY_DEV }} + mongodb_atlas_rp_private_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_RP_PRIVATE_KEY_QA || secrets.MONGODB_ATLAS_RP_PRIVATE_KEY_DEV }} ca_cert: ${{ secrets.CA_CERT }} aws_account_id: ${{ secrets.AWS_ACCOUNT_ID }} aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} @@ -95,7 +97,6 @@ jobs: mongodb_atlas_base_url: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_BASE_URL_QA || vars.MONGODB_ATLAS_BASE_URL }} mongodb_atlas_project_owner_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_PROJECT_OWNER_ID_QA || vars.MONGODB_ATLAS_PROJECT_OWNER_ID }} mongodb_atlas_teams_ids: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_TEAMS_IDS_QA || vars.MONGODB_ATLAS_TEAMS_IDS }} - mongodb_atlas_username: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_USERNAME_CLOUD_QA || vars.MONGODB_ATLAS_USERNAME_CLOUD_DEV }} azure_atlas_app_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.AZURE_ATLAS_APP_ID_QA || vars.AZURE_ATLAS_APP_ID }} azure_service_principal_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.AZURE_SERVICE_PRINCIPAL_ID_QA || vars.AZURE_SERVICE_PRINCIPAL_ID }} azure_tenant_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.AZURE_TENANT_ID_QA || vars.AZURE_TENANT_ID }} @@ -113,3 +114,4 @@ jobs: mongodb_atlas_project_ear_pe_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_PROJECT_EAR_PE_ID_QA || vars.MONGODB_ATLAS_PROJECT_EAR_PE_ID_DEV }} mongodb_atlas_enable_preview: ${{ vars.MONGODB_ATLAS_ENABLE_PREVIEW }} azure_private_endpoint_region: ${{ vars.AZURE_PRIVATE_ENDPOINT_REGION }} + mongodb_atlas_rp_org_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_RP_ORG_ID_QA || vars.MONGODB_ATLAS_RP_ORG_ID_DEV }} diff --git a/.github/workflows/check-changelog-entry-file.yml b/.github/workflows/check-changelog-entry-file.yml index 3f8f275793..6bbcf6e5aa 100644 --- a/.github/workflows/check-changelog-entry-file.yml +++ b/.github/workflows/check-changelog-entry-file.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' diff --git a/.github/workflows/check-migration-guide.yml b/.github/workflows/check-migration-guide.yml index 6a50e14c31..9ea5e908f9 100644 --- a/.github/workflows/check-migration-guide.yml +++ b/.github/workflows/check-migration-guide.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 id: changes with: diff --git a/.github/workflows/cleanup-test-env.yml b/.github/workflows/cleanup-test-env.yml index 5bce1f3c33..fde55e57e0 100644 --- a/.github/workflows/cleanup-test-env.yml +++ b/.github/workflows/cleanup-test-env.yml @@ -11,11 +11,11 @@ jobs: permissions: {} steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: sparse-checkout: | scripts - - uses: mongodb/atlas-github-action@07d212bf80c068dfcfbf6e15b22c61ae6e66d96e + - uses: mongodb/atlas-github-action@15663d068c40a8582d881560961fce9d45e0df9a - name: Cleanup cloud-dev shell: bash env: @@ -29,11 +29,11 @@ jobs: permissions: {} steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: sparse-checkout: | scripts - - uses: mongodb/atlas-github-action@07d212bf80c068dfcfbf6e15b22c61ae6e66d96e + - uses: mongodb/atlas-github-action@15663d068c40a8582d881560961fce9d45e0df9a - name: Cleanup test env network shell: bash env: diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index 13a78afa54..b5f1623f5c 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' @@ -29,7 +29,7 @@ jobs: permissions: pull-requests: write # Needed by sticky-pull-request-comment steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' @@ -40,16 +40,16 @@ jobs: permissions: {} steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - name: Install Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' cache: false # see https://github.com/golangci/golangci-lint-action/issues/807 - name: golangci-lint - uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 with: - version: v1.60.3 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version + version: v1.61.0 # Also update GOLANGCI_VERSION variable in GNUmakefile when updating this version - name: actionlint run: | make tools @@ -60,14 +60,14 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - name: Run ShellCheck uses: bewuethr/shellcheck-action@d01912909579c4b1a335828b8fca197fbb8e0aa4 generate-doc-check: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - run: make tools generate-docs-all - name: Find mutations id: self_mutation diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index a0667e5f6e..e5114f0de9 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' @@ -31,7 +31,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' diff --git a/.github/workflows/jira-release-version.yml b/.github/workflows/jira-release-version.yml index bc0c881d00..ae85851e59 100644 --- a/.github/workflows/jira-release-version.yml +++ b/.github/workflows/jira-release-version.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - name: Validation of version format, no pre-releases run: | echo "${{ inputs.version_number }}" | grep -P '^v\d+\.\d+\.\d+$' diff --git a/.github/workflows/notify-docs-team.yml b/.github/workflows/notify-docs-team.yml index 2eac3ba993..e40c9081fe 100644 --- a/.github/workflows/notify-docs-team.yml +++ b/.github/workflows/notify-docs-team.yml @@ -13,7 +13,7 @@ jobs: permissions: pull-requests: read steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 id: changes with: diff --git a/.github/workflows/pull-request-lint.yml b/.github/workflows/pull-request-lint.yml index 0301404cbf..862a78b46e 100644 --- a/.github/workflows/pull-request-lint.yml +++ b/.github/workflows/pull-request-lint.yml @@ -75,7 +75,7 @@ jobs: contents: read pull-requests: write # Needed by labeler steps: - - uses: srvaroa/labeler@bfe288801b8091b6a70e67f3ce7c9e87c88921cf + - uses: srvaroa/labeler@29471ee1118fa4e10b011964e6e8fe2fd243e700 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 589edb3967..1da129336a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -41,7 +41,7 @@ jobs: run: | echo "${{ inputs.version_number }}" | grep -P '^v\d+\.\d+\.\d+(-pre[A-Za-z0-9-]*)?$' - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.use_existing_tag == 'true' && inputs.version_number || 'master' }} - name: Check for Upgrade Guide @@ -94,7 +94,7 @@ jobs: && needs.release-config.outputs.creates_new_tag == 'true' steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: 'master' - name: Get the latest commit SHA @@ -131,7 +131,7 @@ jobs: && !contains(needs.*.result, 'failure') steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: ref: ${{ inputs.version_number }} - name: Set up Go diff --git a/.github/workflows/run-script-and-commit.yml b/.github/workflows/run-script-and-commit.yml index e80f5b0af4..88cbc3bb9e 100644 --- a/.github/workflows/run-script-and-commit.yml +++ b/.github/workflows/run-script-and-commit.yml @@ -29,7 +29,7 @@ jobs: permissions: {} steps: - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 with: fetch-depth: 0 token: ${{ secrets.apix_bot_pat }} diff --git a/.github/workflows/terraform-compatibility-matrix.yml b/.github/workflows/terraform-compatibility-matrix.yml index 7e6799f8f6..d618674b1f 100644 --- a/.github/workflows/terraform-compatibility-matrix.yml +++ b/.github/workflows/terraform-compatibility-matrix.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - name: Get HashiCorp Terraform supported versions shell: bash id: get-terraform-supported-versions @@ -56,7 +56,7 @@ jobs: runs-on: ubuntu-latest permissions: {} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - name: Get content of slack message id: slack-payload run: | diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index a68a520fed..888d547666 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -91,13 +91,13 @@ jobs: with: payload: | { - "text": ":red_circle: Test Suite failed", + "text": ":red_circle: Test Suite failed (reminder to log failures)", "blocks": [ { "type": "section", "text": { "type": "mrkdwn", - "text": "*Test Suite failed* ${{ secrets.SLACK_ONCALL_TAG }}" + "text": "*Test Suite failed (reminder to log failures)* ${{ secrets.SLACK_ONCALL_TAG }}" } }, { diff --git a/.github/workflows/update-sdk.yml b/.github/workflows/update-sdk.yml index a56f2e13d3..5bfb6ebecc 100644 --- a/.github/workflows/update-sdk.yml +++ b/.github/workflows/update-sdk.yml @@ -13,14 +13,14 @@ jobs: pull-requests: write steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 with: go-version-file: 'go.mod' - name: Update files run: make tools update-atlas-sdk - name: Verify Changed files - uses: tj-actions/verify-changed-files@c55299aaf3f1f7bf2a782f00ba79897f92432d8b + uses: tj-actions/verify-changed-files@54483a2138ca67989bc40785aa22faee8b085894 id: verify-changed-files - name: Create PR uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f diff --git a/.github/workflows/update_tf_compatibility_matrix.yml b/.github/workflows/update_tf_compatibility_matrix.yml index f1030d6377..195a377889 100644 --- a/.github/workflows/update_tf_compatibility_matrix.yml +++ b/.github/workflows/update_tf_compatibility_matrix.yml @@ -13,13 +13,13 @@ jobs: pull-requests: write steps: - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 + uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 - name: Update files env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: make update-tf-compatibility-matrix - name: Verify Changed files - uses: tj-actions/verify-changed-files@c55299aaf3f1f7bf2a782f00ba79897f92432d8b + uses: tj-actions/verify-changed-files@54483a2138ca67989bc40785aa22faee8b085894 id: verify-changed-files - name: Create PR uses: peter-evans/create-pull-request@5e914681df9dc83aa4e4905692ca88beb2f9e91f diff --git a/.golangci.yml b/.golangci.yml index 3620dfb765..83b0802fad 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -123,6 +123,8 @@ issues: - linters: - gocritic text: "^hugeParam: req is heavy" + - path: "_schema\\.go" # exclude rules for schema files as it's auto-genereated from OpenAPI spec + text: "fieldalignment|hugeParam|var-naming|ST1003|S1007|exceeds the maximum|too long|regexpSimplify|nolint" run: timeout: 10m tests: true diff --git a/.tool-versions b/.tool-versions index 46456547b9..ae2c800be9 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1 @@ -golang 1.23.0 -terraform 1.9.5 +terraform 1.9.7 diff --git a/CHANGELOG.md b/CHANGELOG.md index d3ffb74e27..078af522ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,39 @@ ## (Unreleased) +ENHANCEMENTS: + +* data-source/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields ([#2670](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2670)) +* data-source/mongodbatlas_advanced_clusters: Adds new `config_server_management_mode` and `config_server_type` fields ([#2670](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2670)) +* resource/mongodbatlas_advanced_cluster: Adds new `config_server_management_mode` and `config_server_type` fields ([#2670](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2670)) + +## 1.21.1 (October 09, 2024) + +BUG FIXES: + +* resource/mongodbatlas_team: Fixes update logic of `usernames` attribute ensuring team is never emptied ([#2669](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2669)) + +## 1.21.0 (October 07, 2024) + NOTES: * data-source/mongodbatlas_global_cluster_config: Deprecates `custom_zone_mapping` in favor of `custom_zone_mapping_zone_id` ([#2637](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2637)) * resource/mongodbatlas_global_cluster_config: Deprecates `custom_zone_mapping` in favor of `custom_zone_mapping_zone_id` ([#2637](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2637)) +FEATURES: + +* **New Data Source:** `data-source/mongodbatlas_resource_policies` ([#2598](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2598)) +* **New Data Source:** `data-source/mongodbatlas_resource_policy` ([#2598](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2598)) +* **New Resource:** `resource/mongodbatlas_resource_policy` ([#2585](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2585)) + ENHANCEMENTS: +* data-source/mongodbatlas_advanced_cluster: Supports `redact_client_log_data` attribute ([#2600](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2600)) +* data-source/mongodbatlas_advanced_clusters: Supports `redact_client_log_data` attribute ([#2600](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2600)) +* data-source/mongodbatlas_cluster: Supports `redact_client_log_data` attribute ([#2601](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2601)) +* data-source/mongodbatlas_clusters: Supports `redact_client_log_data` attribute ([#2601](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2601)) * data-source/mongodbatlas_global_cluster_config: Adds `custom_zone_mapping_zone_id` attribute ([#2637](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2637)) +* resource/mongodbatlas_advanced_cluster: Supports `redact_client_log_data` attribute ([#2600](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2600)) +* resource/mongodbatlas_cluster: Supports `redact_client_log_data` attribute ([#2601](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2601)) * resource/mongodbatlas_global_cluster_config: Adds `custom_zone_mapping_zone_id` attribute ([#2637](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2637)) BUG FIXES: diff --git a/GNUmakefile b/GNUmakefile index 5b523e1965..ba3690b6b6 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -8,7 +8,7 @@ endif ACCTEST_REGEX_RUN?=^TestAcc ACCTEST_TIMEOUT?=300m -PARALLEL_GO_TEST?=20 +PARALLEL_GO_TEST?=50 BINARY_NAME=terraform-provider-mongodbatlas DESTINATION=./bin/$(BINARY_NAME) @@ -18,7 +18,7 @@ GITTAG=$(shell git describe --always --tags) VERSION=$(GITTAG:v%=%) LINKER_FLAGS=-s -w -X 'github.com/mongodb/terraform-provider-mongodbatlas/version.ProviderVersion=${VERSION}' -GOLANGCI_VERSION=v1.60.3 # Also update golangci-lint GH action in code-health.yml when updating this version +GOLANGCI_VERSION=v1.61.0 # Also update golangci-lint GH action in code-health.yml when updating this version export PATH := $(shell go env GOPATH)/bin:$(PATH) export SHELL := env PATH=$(PATH) /bin/bash @@ -84,6 +84,7 @@ tools: ## Install dev tools go install github.com/hashicorp/terraform-plugin-codegen-framework/cmd/tfplugingen-framework@latest go install github.com/hashicorp/go-changelog/cmd/changelog-build@latest go install github.com/hashicorp/go-changelog/cmd/changelog-entry@latest + go install golang.org/x/tools/cmd/goimports@latest curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin $(GOLANGCI_VERSION) .PHONY: docs @@ -153,3 +154,24 @@ check-changelog-entry-file: .PHONY: jira-release-version jira-release-version: go run ./tools/jira-release-version/*.go + +.PHONY: enable-advancedclustertpf +enable-advancedclustertpf: + make delete-lines filename="./internal/provider/provider_sdk2.go" delete="mongodbatlas_advanced_cluster" + make add-lines filename=./internal/provider/provider.go find="project.Resource," add="advancedclustertpf.Resource," + make add-lines filename=./internal/provider/provider.go find="project.DataSource," add="advancedclustertpf.DataSource," + make add-lines filename=./internal/provider/provider.go find="project.PluralDataSource," add="advancedclustertpf.PluralDataSource," + +.PHONY: delete-lines ${filename} ${delete} +delete-lines: + rm -f file.tmp + grep -v "${delete}" "${filename}" > file.tmp + mv file.tmp ${filename} + goimports -w ${filename} + +.PHONY: add-lines ${filename} ${find} ${add} +add-lines: + rm -f file.tmp + sed 's/${find}/${find}${add}/' "${filename}" > "file.tmp" + mv file.tmp ${filename} + goimports -w ${filename} diff --git a/docs/data-sources/advanced_cluster.md b/docs/data-sources/advanced_cluster.md index f1beb03e9b..bd62827de3 100644 --- a/docs/data-sources/advanced_cluster.md +++ b/docs/data-sources/advanced_cluster.md @@ -104,6 +104,9 @@ In addition to all arguments above, the following attributes are exported: * `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details. * `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true). * `replica_set_scaling_strategy` - (Optional) Replica set scaling mode for your cluster. +* `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. +* `config_server_management_mode` - Config Server Management Mode for creating or updating a sharded cluster. Valid values are `ATLAS_MANAGED` (default) and `FIXED_TO_DEDICATED`. When configured as `ATLAS_MANAGED`, Atlas may automatically switch the cluster's config server type for optimal performance and savings. When configured as `FIXED_TO_DEDICATED`, the cluster will always use a dedicated config server. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). +* `config_server_type` Describes a sharded cluster's config server type. Valid values are `DEDICATED` and `EMBEDDED`. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). ### bi_connector_config @@ -123,8 +126,6 @@ To learn more, see [Resource Tags](https://dochub.mongodb.org/core/add-cluster-t ### labels -**WARNING:** This property is deprecated and will be removed in the future, use the `tags` attribute instead. - Key-value pairs that categorize the cluster. Each key and value has a maximum length of 255 characters. You cannot set the key `Infrastructure Tool`, it is used for internal purposes to track aggregate usage. * `key` - The key that you want to write. diff --git a/docs/data-sources/advanced_clusters.md b/docs/data-sources/advanced_clusters.md index 978541f8c3..98f85f42bd 100644 --- a/docs/data-sources/advanced_clusters.md +++ b/docs/data-sources/advanced_clusters.md @@ -106,6 +106,9 @@ In addition to all arguments above, the following attributes are exported: * `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details. * `global_cluster_self_managed_sharding` - Flag that indicates if cluster uses Atlas-Managed Sharding (false) or Self-Managed Sharding (true). * `replica_set_scaling_strategy` - (Optional) Replica set scaling mode for your cluster. +* `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. +* `config_server_management_mode` - Config Server Management Mode for creating or updating a sharded cluster. Valid values are `ATLAS_MANAGED` (default) and `FIXED_TO_DEDICATED`. When configured as `ATLAS_MANAGED`, Atlas may automatically switch the cluster's config server type for optimal performance and savings. When configured as `FIXED_TO_DEDICATED`, the cluster will always use a dedicated config server. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). +* `config_server_type` Describes a sharded cluster's config server type. Valid values are `DEDICATED` and `EMBEDDED`. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). ### bi_connector_config @@ -125,8 +128,6 @@ To learn more, see [Resource Tags](https://dochub.mongodb.org/core/add-cluster-t ### labels -**WARNING:** This property is deprecated and will be removed in the future, use the `tags` attribute instead. - Key-value pairs that categorize the cluster. Each key and value has a maximum length of 255 characters. You cannot set the key `Infrastructure Tool`, it is used for internal purposes to track aggregate usage. * `key` - The key that you want to write. diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index 77d0d75be1..5dc6c71783 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -128,6 +128,8 @@ In addition to all arguments above, the following attributes are exported: * `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details. +* `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. + ### BI Connector Indicates BI Connector for Atlas configuration. @@ -166,8 +168,6 @@ To learn more, see [Resource Tags](https://dochub.mongodb.org/core/add-cluster-t ### Labels -**WARNING:** This property is deprecated and will be removed in the future, use the `tags` attribute instead. - Key-value pairs that categorize the cluster. Each key and value has a maximum length of 255 characters. You cannot set the key `Infrastructure Tool`, it is used for internal purposes to track aggregate usage. * `key` - The key that you want to write. diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index 5cfbce824a..8a3be69f86 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -119,6 +119,8 @@ In addition to all arguments above, the following attributes are exported: * `advanced_configuration` - Get the advanced configuration options. See [Advanced Configuration](#advanced-configuration) below for more details. +* `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. + ### BI Connector Indicates BI Connector for Atlas configuration. @@ -157,8 +159,6 @@ To learn more, see [Resource Tags](https://dochub.mongodb.org/core/add-cluster-t ### Labels -**WARNING:** This property is deprecated and will be removed in the future, use the `tags` attribute instead. - Key-value pairs that categorize the cluster. Each key and value has a maximum length of 255 characters. You cannot set the key `Infrastructure Tool`, it is used for internal purposes to track aggregate usage. * `key` - The key that you want to write. diff --git a/docs/data-sources/resource_policies.md b/docs/data-sources/resource_policies.md new file mode 100644 index 0000000000..daf1d45560 --- /dev/null +++ b/docs/data-sources/resource_policies.md @@ -0,0 +1,142 @@ +# Data Source: mongodbatlas_resource_policies + +`mongodbatlas_resource_policies` returns all resource policies in an organization. + +-> **NOTE**: Resource Policies are currently in Private Preview. To use this feature, you must take the following actions: +1. Enable the `Atlas Resource Policies` Preview Feature in your organization (contact [MongoDB Support](https://www.mongodb.com/services/support)). +2. Enable the [Preview Features](https://github.com/mongodb/terraform-provider-mongodbatlas?tab=readme-ov-file#preview-features) when running `terraform` commands. + +## Example Usages +```terraform +resource "mongodbatlas_resource_policy" "project_ip_access_list" { + org_id = var.org_id + name = "forbid-access-from-anywhere" + + policies = [ + { + body = < policy.id } +} +``` + + +## Schema + +### Required + +- `org_id` (String) Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access. + +### Read-Only + +- `resource_policies` (Attributes List) (see [below for nested schema](#nestedatt--resource_policies)) + + +### Nested Schema for `resource_policies` + +Read-Only: + +- `created_by_user` (Attributes) The user that last updated the Atlas resource policy. (see [below for nested schema](#nestedatt--resource_policies--created_by_user)) +- `created_date` (String) Date and time in UTC when the Atlas resource policy was created. +- `id` (String) Unique 24-hexadecimal digit string that identifies an Atlas resource policy. +- `last_updated_by_user` (Attributes) The user that last updated the Atlas resource policy. (see [below for nested schema](#nestedatt--resource_policies--last_updated_by_user)) +- `last_updated_date` (String) Date and time in UTC when the Atlas resource policy was last updated. +- `name` (String) Human-readable label that describes the Atlas resource policy. +- `org_id` (String) Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access. +- `policies` (Attributes List) List of policies that make up the Atlas resource policy. (see [below for nested schema](#nestedatt--resource_policies--policies)) +- `version` (String) A string that identifies the version of the Atlas resource policy. + + +### Nested Schema for `resource_policies.created_by_user` + +Read-Only: + +- `id` (String) Unique 24-hexadecimal character string that identifies a user. +- `name` (String) Human-readable label that describes a user. + + + +### Nested Schema for `resource_policies.last_updated_by_user` + +Read-Only: + +- `id` (String) Unique 24-hexadecimal character string that identifies a user. +- `name` (String) Human-readable label that describes a user. + + + +### Nested Schema for `resource_policies.policies` + +Read-Only: + +- `body` (String) A string that defines the permissions for the policy. The syntax used is the Cedar Policy language. +- `id` (String) Unique 24-hexadecimal character string that identifies the policy. + diff --git a/docs/data-sources/resource_policy.md b/docs/data-sources/resource_policy.md new file mode 100644 index 0000000000..1405e79239 --- /dev/null +++ b/docs/data-sources/resource_policy.md @@ -0,0 +1,134 @@ +# Data Source: mongodbatlas_resource_policy + +`mongodbatlas_resource_policy` describes a resource policy in an organization. + +-> **NOTE**: Resource Policies are currently in Private Preview. To use this feature, you must take the following actions: +1. Enable the `Atlas Resource Policies` Preview Feature in your organization (contact [MongoDB Support](https://www.mongodb.com/services/support)). +2. Enable the [Preview Features](https://github.com/mongodb/terraform-provider-mongodbatlas?tab=readme-ov-file#preview-features) when running `terraform` commands. + +## Example Usages +```terraform +resource "mongodbatlas_resource_policy" "project_ip_access_list" { + org_id = var.org_id + name = "forbid-access-from-anywhere" + + policies = [ + { + body = < policy.id } +} +``` + + +## Schema + +### Required + +- `id` (String) Unique 24-hexadecimal digit string that identifies an Atlas resource policy. +- `org_id` (String) Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access. + +### Read-Only + +- `created_by_user` (Attributes) The user that last updated the Atlas resource policy. (see [below for nested schema](#nestedatt--created_by_user)) +- `created_date` (String) Date and time in UTC when the Atlas resource policy was created. +- `last_updated_by_user` (Attributes) The user that last updated the Atlas resource policy. (see [below for nested schema](#nestedatt--last_updated_by_user)) +- `last_updated_date` (String) Date and time in UTC when the Atlas resource policy was last updated. +- `name` (String) Human-readable label that describes the Atlas resource policy. +- `policies` (Attributes List) List of policies that make up the Atlas resource policy. (see [below for nested schema](#nestedatt--policies)) +- `version` (String) A string that identifies the version of the Atlas resource policy. + + +### Nested Schema for `created_by_user` + +Read-Only: + +- `id` (String) Unique 24-hexadecimal character string that identifies a user. +- `name` (String) Human-readable label that describes a user. + + + +### Nested Schema for `last_updated_by_user` + +Read-Only: + +- `id` (String) Unique 24-hexadecimal character string that identifies a user. +- `name` (String) Human-readable label that describes a user. + + + +### Nested Schema for `policies` + +Read-Only: + +- `body` (String) A string that defines the permissions for the policy. The syntax used is the Cedar Policy language. +- `id` (String) Unique 24-hexadecimal character string that identifies the policy. + diff --git a/docs/guides/1.21.0-upgrade-guide.md b/docs/guides/1.21.0-upgrade-guide.md new file mode 100644 index 0000000000..05f887c032 --- /dev/null +++ b/docs/guides/1.21.0-upgrade-guide.md @@ -0,0 +1,31 @@ +--- +page_title: "Upgrade Guide 1.21.0" +--- + +# MongoDB Atlas Provider 1.21.0: Upgrade and Information Guide + +The Terraform MongoDB Atlas Provider version 1.21.0 has a number of new and exciting features. + +## New Resources, Data Sources, and Features + +- You can now manage Resource Policies with the new `mongodbatlas_resource_policy` resource and corresponding data sources. The feature is available as a preview feature. To learn more, please review `mongodbatlas_resource_policy` [resource documentation](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/resource_policy). + +- `custom_zone_mapping_zone_id` attribute has been added to `mongodbatlas_global_cluster_config` resource and data source. To learn more, please review the resource [documentation](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/global_cluster_config#custom_zone_mapping_zone_id). + +- `redact_client_log_data` attribute has been added to `mongodbatlas_advanced_cluster`, `mongodbatlas_cluster` and corresponding data sources. + +## Deprecations and removals + +- `custom_zone_mapping` attribute has been deprecated in `mongodbatlas_global_cluster_config` resource and data source in favor of the new `custom_zone_mapping_zone_id` [attribute](https://registry.terraform.io/providers/mongodb/mongodbatlas/latest/docs/resources/global_cluster_config#custom_zone_mapping_zone_id). + +## Terraform MongoDB Atlas modules + +You can now leverage our [Terraform Modules](https://registry.terraform.io/namespaces/terraform-mongodbatlas-modules) to easily get started with MongoDB Atlas and critical features like [Push-based log export](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/push-based-log-export/mongodbatlas/latest), [Private Endpoints](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/private-endpoint/mongodbatlas/latest), etc. + +## Helpful Links + +* [Report bugs](https://github.com/mongodb/terraform-provider-mongodbatlas/issues) + +* [Request Features](https://feedback.mongodb.com/forums/924145-atlas?category_id=370723) + +* [Contact Support](https://docs.atlas.mongodb.com/support/) covered by MongoDB Atlas support plans, Developer and above. diff --git a/docs/index.md b/docs/index.md index 0245317400..9a756001c8 100644 --- a/docs/index.md +++ b/docs/index.md @@ -183,7 +183,6 @@ For more information on configuring and managing programmatic API Keys see the [ | 1.6.x | 2023-10-04 | 2025-10-31 | 2025-10-31 | | 1.5.x | 2023-06-12 | 2025-06-30 | 2025-06-30 | | 1.4.x | 2023-03-08 | 2025-03-31 | 2025-03-31 | -| 1.3.x | 2022-09-21 | 2024-09-30 | 2024-09-30 | For the safety of our users, we require only consuming versions of HashiCorp Terraform that are currently receiving Security / Maintenance Updates. For more details see [Support Period and End-of-Life (EOL) Policy](https://support.hashicorp.com/hc/en-us/articles/360021185113-Support-Period-and-End-of-Life-EOL-Policy). @@ -219,7 +218,7 @@ We ship binaries but do not prioritize fixes for the following operating system ## Examples from MongoDB and the Community -We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.20.0/examples) +We have [example configurations](https://github.com/mongodb/terraform-provider-mongodbatlas/tree/v1.21.1/examples) in our GitHub repo that will help both beginner and more advanced users. Have a good example you've created and want to share? @@ -227,4 +226,4 @@ Let us know the details via an [issue](https://github.com/mongodb/terraform-prov or submit a PR of your work to add it to the `examples` directory in our [GitHub repo](https://github.com/mongodb/terraform-provider-mongodbatlas/). ## Terraform MongoDB Atlas Modules -You can now leverage our [Terraform Modules](https://registry.terraform.io/namespaces/terraform-mongodbatlas-modules) to easily get started with MongoDB Atlas and critical features like [Push-based log export](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/push-based-log-export/mongodbatlas/latest), [Private Endpoints](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/private-endpoint/mongodbatlas/latest), etc. \ No newline at end of file +You can now leverage our [Terraform Modules](https://registry.terraform.io/namespaces/terraform-mongodbatlas-modules) to easily get started with MongoDB Atlas and critical features like [Push-based log export](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/push-based-log-export/mongodbatlas/latest), [Private Endpoints](https://registry.terraform.io/modules/terraform-mongodbatlas-modules/private-endpoint/mongodbatlas/latest), etc. diff --git a/docs/resources/advanced_cluster.md b/docs/resources/advanced_cluster.md index e73bf9ae72..0166a8229a 100644 --- a/docs/resources/advanced_cluster.md +++ b/docs/resources/advanced_cluster.md @@ -399,6 +399,8 @@ This parameter defaults to false. * `accept_data_risks_and_force_replica_set_reconfig` - (Optional) If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set `accept_data_risks_and_force_replica_set_reconfig` to the current date. Learn more about Reconfiguring a Replica Set during a regional outage [here](https://dochub.mongodb.org/core/regional-outage-reconfigure-replica-set). * `global_cluster_self_managed_sharding` - (Optional) Flag that indicates if cluster uses Atlas-Managed Sharding (false, default) or Self-Managed Sharding (true). It can only be enabled for Global Clusters (`GEOSHARDED`). It cannot be changed once the cluster is created. Use this mode if you're an advanced user and the default configuration is too restrictive for your workload. If you select this option, you must manually configure the sharding strategy, more info [here](https://www.mongodb.com/docs/atlas/tutorial/create-global-cluster/#select-your-sharding-configuration). * `replica_set_scaling_strategy` - (Optional) Replica set scaling mode for your cluster. Valid values are `WORKLOAD_TYPE`, `SEQUENTIAL` and `NODE_TYPE`. By default, Atlas scales under `WORKLOAD_TYPE`. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes. When configured as `SEQUENTIAL`, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads. When configured as `NODE_TYPE`, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads. [Modify the Replica Set Scaling Mode](https://dochub.mongodb.org/core/scale-nodes) +* `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. Use this in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements. **Note**: Changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated. +* `config_server_management_mode` - (Optional) Config Server Management Mode for creating or updating a sharded cluster. Valid values are `ATLAS_MANAGED` (default) and `FIXED_TO_DEDICATED`. When configured as `ATLAS_MANAGED`, Atlas may automatically switch the cluster's config server type for optimal performance and savings. When configured as `FIXED_TO_DEDICATED`, the cluster will always use a dedicated config server. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). ### bi_connector_config @@ -482,8 +484,6 @@ To learn more, see [Resource Tags](https://dochub.mongodb.org/core/add-cluster-t ### labels -**WARNING:** This property is deprecated and will be removed in the future, use the `tags` attribute instead. - ```terraform labels { key = "Key 1" @@ -593,7 +593,7 @@ If you are upgrading a replica set to a sharded cluster, you cannot increase the ### auto_scaling -* `disk_gb_enabled` - (Optional) Flag that indicates whether this cluster enables disk auto-scaling. This parameter defaults to true. +* `disk_gb_enabled` - (Optional) Flag that indicates whether this cluster enables disk auto-scaling. This parameter defaults to false. - Set to `true` to enable disk auto-scaling. - Set to `false` to disable disk auto-scaling. @@ -633,7 +633,7 @@ After adding the `lifecycle` block to explicitly change `instance_size` comment ### analytics_auto_scaling -* `disk_gb_enabled` - (Optional) Flag that indicates whether this cluster enables disk auto-scaling. This parameter defaults to true. +* `disk_gb_enabled` - (Optional) Flag that indicates whether this cluster enables disk auto-scaling. This parameter defaults to false. * `compute_enabled` - (Optional) Flag that indicates whether instance size auto-scaling is enabled. This parameter defaults to false. ~> **IMPORTANT:** If `compute_enabled` is true, then Atlas will automatically scale up to the maximum provided and down to the minimum, if provided. @@ -683,6 +683,8 @@ In addition to all arguments above, the following attributes are exported: - DELETED - REPAIRING * `replication_specs.#.container_id` - A key-value map of the Network Peering Container ID(s) for the configuration specified in `region_configs`. The Container ID is the id of the container created when the first cluster in the region (AWS/Azure) or project (GCP) was created. The syntax is `"providerName:regionName" = "containerId"`. Example `AWS:US_EAST_1" = "61e0797dde08fb498ca11a71`. +* `config_server_type` Describes a sharded cluster's config server type. Valid values are `DEDICATED` and `EMBEDDED`. To learn more, see the [Sharded Cluster Config Servers documentation](https://dochub.mongodb.org/docs/manual/core/sharded-cluster-config-servers/). + ## Import diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index b9b48bc4bf..d102bae353 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -281,7 +281,7 @@ Refer to the following for full privatelink endpoint connection string examples: * `name` - (Required) Name of the cluster as it appears in Atlas. Once the cluster is created, its name cannot be changed. **WARNING** Changing the name will result in destruction of the existing cluster and the creation of a new cluster. * `provider_instance_size_name` - (Required) Atlas provides different instance sizes, each with a default storage capacity and RAM size. The instance size you select is used for all the data-bearing servers in your cluster. See [Create a Cluster](https://docs.atlas.mongodb.com/reference/api/clusters-create-one/) `providerSettings.instanceSizeName` for valid values and default resources. -* `auto_scaling_disk_gb_enabled` - (Optional) Specifies whether disk auto-scaling is enabled. The default is true. +* `auto_scaling_disk_gb_enabled` - (Optional) Specifies whether disk auto-scaling is enabled. The default is false. - Set to `true` to enable disk auto-scaling. - Set to `false` to disable disk auto-scaling. @@ -387,6 +387,7 @@ But in order to explicitly change `provider_instance_size_name` comment the `lif - `LTS`: Atlas creates your cluster using the latest patch release of the MongoDB version that you specify in the mongoDBMajorVersion field. Atlas automatically updates your cluster to subsequent patch releases of this MongoDB version. Atlas doesn't update your cluster to newer rapid or major MongoDB releases as they become available. * `timeouts`- (Optional) The duration of time to wait for Cluster to be created, updated, or deleted. The timeout value is defined by a signed sequence of decimal numbers with an time unit suffix such as: `1h45m`, `300s`, `10m`, .... The valid time units are: `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. The default timeout for Cluster create & delete is `3h`. Learn more about timeouts [here](https://www.terraform.io/plugin/sdkv2/resources/retries-and-customizable-timeouts). * `accept_data_risks_and_force_replica_set_reconfig`- (Optional) If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set `accept_data_risks_and_force_replica_set_reconfig` to the current date. Learn more about Reconfiguring a Replica Set during a regional outage [here](https://dochub.mongodb.org/core/regional-outage-reconfigure-replica-set). +* `redact_client_log_data` - (Optional) Flag that enables or disables log redaction, see the [manual](https://www.mongodb.com/docs/manual/administration/monitoring/#log-redaction) for more info. Use this in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements. **Note**: Changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated. The log redaction field is updated via an Atlas API call after cluster creation. Consequently, there may be a brief period during resource creation when log redaction is not yet enabled. To ensure complete log redaction from the outset, use `mongodbatlas_advanced_cluster`. ### Multi-Region Cluster @@ -521,8 +522,6 @@ To learn more, see [Resource Tags](https://dochub.mongodb.org/core/add-cluster-t ### Labels -**WARNING:** This property is deprecated and will be removed in the future, use the `tags` attribute instead. - ```terraform labels { key = "Key 1" diff --git a/docs/resources/resource_policy.md b/docs/resources/resource_policy.md new file mode 100644 index 0000000000..60063f6956 --- /dev/null +++ b/docs/resources/resource_policy.md @@ -0,0 +1,145 @@ +# Resource: mongodbatlas_resource_policy + +`mongodbatlas_resource_policy` provides a Resource Policy resource. The resource lets you create, edit and delete resource policies to prevent misconfigurations and reduce the need for corrective interventions in your organization. + +-> **NOTE**: Resource Policies are currently in Private Preview. To use this feature, you must take the following actions: +1. Enable the `Atlas Resource Policies` Preview Feature in your organization (contact [MongoDB Support](https://www.mongodb.com/services/support)). +2. Enable the [Preview Features](https://github.com/mongodb/terraform-provider-mongodbatlas?tab=readme-ov-file#preview-features) when running `terraform` commands. + + +## Example Usages + +```terraform +resource "mongodbatlas_resource_policy" "project_ip_access_list" { + org_id = var.org_id + name = "forbid-access-from-anywhere" + + policies = [ + { + body = < policy.id } +} +``` + + +## Schema + +### Required + +- `name` (String) Human-readable label that describes the Atlas resource policy. +- `org_id` (String) Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access. +- `policies` (Attributes List) List of policies that make up the Atlas resource policy. (see [below for nested schema](#nestedatt--policies)) + +### Read-Only + +- `created_by_user` (Attributes) The user that last updated the Atlas resource policy. (see [below for nested schema](#nestedatt--created_by_user)) +- `created_date` (String) Date and time in UTC when the Atlas resource policy was created. +- `id` (String) Unique 24-hexadecimal digit string that identifies an Atlas resource policy. +- `last_updated_by_user` (Attributes) The user that last updated the Atlas resource policy. (see [below for nested schema](#nestedatt--last_updated_by_user)) +- `last_updated_date` (String) Date and time in UTC when the Atlas resource policy was last updated. +- `version` (String) A string that identifies the version of the Atlas resource policy. + + +### Nested Schema for `policies` + +Required: + +- `body` (String) A string that defines the permissions for the policy. The syntax used is the Cedar Policy language. + +Read-Only: + +- `id` (String) Unique 24-hexadecimal character string that identifies the policy. + + + +### Nested Schema for `created_by_user` + +Read-Only: + +- `id` (String) Unique 24-hexadecimal character string that identifies a user. +- `name` (String) Human-readable label that describes a user. + + + +### Nested Schema for `last_updated_by_user` + +Read-Only: + +- `id` (String) Unique 24-hexadecimal character string that identifies a user. +- `name` (String) Human-readable label that describes a user. + +# Import +Resource Policy resource can be imported using the org ID and policy ID, in the format `{ORG_ID}-{POLICY_ID}`, e.g. + +``` +terraform import mongodbatlas_resource_policy.cloud_region 65def6ce0f722a1507105aa5-66f1c018dba9c04e7dcfaf36 +``` diff --git a/examples/mongodbatlas_resource_policy/README.md b/examples/mongodbatlas_resource_policy/README.md new file mode 100644 index 0000000000..ee34dfbe63 --- /dev/null +++ b/examples/mongodbatlas_resource_policy/README.md @@ -0,0 +1,12 @@ +# MongoDB Atlas Provider -- Atlas Resource Policy +This example creates three different resource policies in an organization. + +**NOTE**: Resource Policies are currently in Private Preview. To use this feature, you must take the following actions: +1. Enable the `Atlas Resource Policies` Preview Feature in your organization (contact [MongoDB Support](https://www.mongodb.com/services/support)). +2. Enable the [Preview Features](../../README.md#preview-features) when running `terraform` commands. + + +Variables Required to be set: +- `public_key`: Atlas public key +- `private_key`: Atlas private key +- `org_id`: Organization ID where project will be created diff --git a/examples/mongodbatlas_resource_policy/cloud-provider.cedar b/examples/mongodbatlas_resource_policy/cloud-provider.cedar new file mode 100644 index 0000000000..69bdf92dd1 --- /dev/null +++ b/examples/mongodbatlas_resource_policy/cloud-provider.cedar @@ -0,0 +1,7 @@ +forbid ( + principal, + action == cloud::Action::"cluster.createEdit", + resource +) +when +{ context.cluster.cloudProviders.containsAny([cloud::cloudProvider::"${CLOUD_PROVIDER}"]) }; \ No newline at end of file diff --git a/examples/mongodbatlas_resource_policy/main.tf b/examples/mongodbatlas_resource_policy/main.tf new file mode 100644 index 0000000000..e0bb93ab8c --- /dev/null +++ b/examples/mongodbatlas_resource_policy/main.tf @@ -0,0 +1,78 @@ +resource "mongodbatlas_resource_policy" "project_ip_access_list" { + org_id = var.org_id + name = "forbid-access-from-anywhere" + + policies = [ + { + body = < policy.id } +} diff --git a/examples/mongodbatlas_resource_policy/provider.tf b/examples/mongodbatlas_resource_policy/provider.tf new file mode 100644 index 0000000000..e5aeda8033 --- /dev/null +++ b/examples/mongodbatlas_resource_policy/provider.tf @@ -0,0 +1,4 @@ +provider "mongodbatlas" { + public_key = var.public_key + private_key = var.private_key +} \ No newline at end of file diff --git a/examples/mongodbatlas_resource_policy/variables.tf b/examples/mongodbatlas_resource_policy/variables.tf new file mode 100644 index 0000000000..503476e252 --- /dev/null +++ b/examples/mongodbatlas_resource_policy/variables.tf @@ -0,0 +1,12 @@ +variable "public_key" { + description = "Public API key to authenticate to Atlas" + type = string +} +variable "private_key" { + description = "Private API key to authenticate to Atlas" + type = string +} +variable "org_id" { + description = "Atlas Organization ID" + type = string +} \ No newline at end of file diff --git a/examples/mongodbatlas_resource_policy/versions.tf b/examples/mongodbatlas_resource_policy/versions.tf new file mode 100644 index 0000000000..3a10272928 --- /dev/null +++ b/examples/mongodbatlas_resource_policy/versions.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + cedar = { + source = "common-fate/cedar" + version = "0.2.0" + } + mongodbatlas = { + source = "mongodb/mongodbatlas" + version = "~> 1.20" + } + } + required_version = ">= 1.0" +} diff --git a/go.mod b/go.mod index 514876e7a4..17bcc6b10b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/mongodb/terraform-provider-mongodbatlas -go 1.23 +go 1.23.2 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 @@ -24,7 +24,7 @@ require ( github.com/zclconf/go-cty v1.15.0 go.mongodb.org/atlas v0.37.0 go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 - go.mongodb.org/atlas-sdk/v20240805004 v20240805004.2.0 + go.mongodb.org/atlas-sdk/v20240805004 v20240805004.7.0 go.mongodb.org/realm v0.1.0 ) diff --git a/go.sum b/go.sum index 09ba56ee59..be9c337356 100644 --- a/go.sum +++ b/go.sum @@ -780,8 +780,8 @@ go.mongodb.org/atlas v0.37.0 h1:zQnO1o5+bVP9IotpAYpres4UjMD2F4nwNEFTZhNL4ck= go.mongodb.org/atlas v0.37.0/go.mod h1:DJYtM+vsEpPEMSkQzJnFHrT0sP7ev6cseZc/GGjJYG8= go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 h1:d/gbYJ+obR0EM/3DZf7+ZMi2QWISegm3mid7Or708cc= go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0/go.mod h1:O47ZrMMfcWb31wznNIq2PQkkdoFoK0ea2GlmRqGJC2s= -go.mongodb.org/atlas-sdk/v20240805004 v20240805004.2.0 h1:f40gW44GYiaF29nSJd0kU+SzRfrLlgU/8PmMIQdO+zc= -go.mongodb.org/atlas-sdk/v20240805004 v20240805004.2.0/go.mod h1:64cvOvwsE0TTi9nAl16XlVkHzT0gFLGcZeG7HnnXxtE= +go.mongodb.org/atlas-sdk/v20240805004 v20240805004.7.0 h1:v899BXuHGSWZAPXtckDSG2ih2Xop87JhJxQeG7X4gFw= +go.mongodb.org/atlas-sdk/v20240805004 v20240805004.7.0/go.mod h1:7KSDRNJm1Dxc+ggk2MOggp2/4zUpM4aRcP12evvsrQ4= go.mongodb.org/realm v0.1.0 h1:zJiXyLaZrznQ+Pz947ziSrDKUep39DO4SfA0Fzx8M4M= go.mongodb.org/realm v0.1.0/go.mod h1:4Vj6iy+Puo1TDERcoh4XZ+pjtwbOzPpzqy3Cwe8ZmDM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= diff --git a/internal/common/conversion/error_framework.go b/internal/common/conversion/error_framework.go new file mode 100644 index 0000000000..5586456cdf --- /dev/null +++ b/internal/common/conversion/error_framework.go @@ -0,0 +1,34 @@ +package conversion + +import ( + "encoding/json" + + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type ErrBody interface { + Body() []byte +} + +// AddJSONBodyErrorToDiagnostics tries to get the JSON body from the error and add it to the diagnostics. +// For example, admin.GenericOpenAPIError has the Body() []byte method. +func AddJSONBodyErrorToDiagnostics(msgPrefix string, err error, diags *diag.Diagnostics) { + errGeneric, ok := err.(ErrBody) + if !ok { + diags.AddError(msgPrefix, err.Error()) + return + } + var respJSON map[string]any + errMarshall := json.Unmarshal(errGeneric.Body(), &respJSON) + if errMarshall != nil { + diags.AddError(msgPrefix, err.Error()) + return + } + errorBytes, errMarshall := json.MarshalIndent(respJSON, "", " ") + if errMarshall != nil { + diags.AddError(msgPrefix, err.Error()) + return + } + errorJSON := string(errorBytes) + diags.AddError(msgPrefix, errorJSON) +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 32820010e3..e432f4737d 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -37,6 +37,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/projectipaccesslist" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/projectipaddresses" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/pushbasedlogexport" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/resourcepolicy" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/searchdeployment" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streamconnection" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/streaminstance" @@ -445,8 +446,10 @@ func (p *MongodbtlasProvider) DataSources(context.Context) []func() datasource.D encryptionatrestprivateendpoint.PluralDataSource, mongodbemployeeaccessgrant.DataSource, } - previewDataSources := []func() datasource.DataSource{} // Data sources not yet in GA - + previewDataSources := []func() datasource.DataSource{ + resourcepolicy.DataSource, + resourcepolicy.PluralDataSource, + } // Data sources not yet in GA if providerEnablePreview { dataSources = append(dataSources, previewDataSources...) } @@ -468,8 +471,9 @@ func (p *MongodbtlasProvider) Resources(context.Context) []func() resource.Resou encryptionatrestprivateendpoint.Resource, mongodbemployeeaccessgrant.Resource, } - previewResources := []func() resource.Resource{} // Resources not yet in GA - + previewResources := []func() resource.Resource{ + resourcepolicy.Resource, + } // Resources not yet in GA if providerEnablePreview { resources = append(resources, previewResources...) } diff --git a/internal/service/advancedcluster/README.md b/internal/service/advancedcluster/README.md new file mode 100644 index 0000000000..db920d49a8 --- /dev/null +++ b/internal/service/advancedcluster/README.md @@ -0,0 +1,9 @@ +# advancedcluster package + +This package contains current exposed implementation of `mongodbatlas_advanced_cluster`. + +**Note:** This file and complete package will be deleted once the TPF update in `advancedclustertpf` is ready. + +If you change something in this package, please: +- Do the same change in `advancedclustertpf` package +- Or if you can't, add a note to [`advancedclustertpf` README.md file](../advancedclustertpf/README.md#changes-in-advancedcluster-that-needs-to-be-added-here) diff --git a/internal/service/advancedcluster/data_source_advanced_cluster.go b/internal/service/advancedcluster/data_source_advanced_cluster.go index 3796b7209b..40e58fee9d 100644 --- a/internal/service/advancedcluster/data_source_advanced_cluster.go +++ b/internal/service/advancedcluster/data_source_advanced_cluster.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -68,9 +67,8 @@ func DataSource() *schema.Resource { Computed: true, }, "labels": { - Type: schema.TypeSet, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), + Type: schema.TypeSet, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -250,6 +248,18 @@ func DataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "redact_client_log_data": { + Type: schema.TypeBool, + Computed: true, + }, + "config_server_management_mode": { + Type: schema.TypeString, + Computed: true, + }, + "config_server_type": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -294,6 +304,9 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. if err := d.Set("replica_set_scaling_strategy", clusterDescNew.GetReplicaSetScalingStrategy()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replica_set_scaling_strategy", clusterName, err)) } + if err := d.Set("redact_client_log_data", clusterDescNew.GetRedactClientLogData()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) + } zoneNameToZoneIDs, err := getZoneIDsFromNewAPI(clusterDescNew) if err != nil { @@ -305,7 +318,10 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err)) } - diags := setRootFields(d, convertClusterDescToLatestExcludeRepSpecs(clusterDescOld), false) + clusterDesc := convertClusterDescToLatestExcludeRepSpecs(clusterDescOld) + clusterDesc.ConfigServerManagementMode = clusterDescNew.ConfigServerManagementMode + clusterDesc.ConfigServerType = clusterDescNew.ConfigServerType + diags := setRootFields(d, clusterDesc, false) if diags.HasError() { return diags } @@ -327,6 +343,9 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag. if err := d.Set("replica_set_scaling_strategy", clusterDescLatest.GetReplicaSetScalingStrategy()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replica_set_scaling_strategy", clusterName, err)) } + if err := d.Set("redact_client_log_data", clusterDescLatest.GetRedactClientLogData()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) + } zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) if err != nil { diff --git a/internal/service/advancedcluster/data_source_advanced_clusters.go b/internal/service/advancedcluster/data_source_advanced_clusters.go index 947c6073d8..d9c52e70be 100644 --- a/internal/service/advancedcluster/data_source_advanced_clusters.go +++ b/internal/service/advancedcluster/data_source_advanced_clusters.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) @@ -81,9 +80,8 @@ func PluralDataSource() *schema.Resource { Computed: true, }, "labels": { - Type: schema.TypeSet, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), + Type: schema.TypeSet, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -263,6 +261,18 @@ func PluralDataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "redact_client_log_data": { + Type: schema.TypeBool, + Computed: true, + }, + "config_server_management_mode": { + Type: schema.TypeString, + Computed: true, + }, + "config_server_type": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -362,6 +372,9 @@ func flattenAdvancedClusters(ctx context.Context, connV220240530 *admin20240530. "version_release_system": cluster.GetVersionReleaseSystem(), "global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(), "replica_set_scaling_strategy": cluster.GetReplicaSetScalingStrategy(), + "redact_client_log_data": cluster.GetRedactClientLogData(), + "config_server_management_mode": cluster.GetConfigServerManagementMode(), + "config_server_type": cluster.GetConfigServerType(), } results = append(results, result) } @@ -418,6 +431,9 @@ func flattenAdvancedClustersOldSDK(ctx context.Context, connV20240530 *admin2024 "version_release_system": cluster.GetVersionReleaseSystem(), "global_cluster_self_managed_sharding": cluster.GetGlobalClusterSelfManagedSharding(), "replica_set_scaling_strategy": clusterDescNew.GetReplicaSetScalingStrategy(), + "redact_client_log_data": clusterDescNew.GetRedactClientLogData(), + "config_server_management_mode": clusterDescNew.GetConfigServerManagementMode(), + "config_server_type": clusterDescNew.GetConfigServerType(), } results = append(results, result) } diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 6196cea243..4cbc853a57 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -123,10 +123,9 @@ func Resource() *schema.Resource { Computed: true, }, "labels": { - Type: schema.TypeSet, - Optional: true, - Set: HashFunctionForKeyValuePair, - Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), + Type: schema.TypeSet, + Optional: true, + Set: HashFunctionForKeyValuePair, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -341,6 +340,20 @@ func Resource() *schema.Resource { Optional: true, Computed: true, }, + "redact_client_log_data": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "config_server_management_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "config_server_type": { + Type: schema.TypeString, + Computed: true, + }, }, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(3 * time.Hour), @@ -450,6 +463,12 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if v, ok := d.GetOk("replica_set_scaling_strategy"); ok { params.ReplicaSetScalingStrategy = conversion.StringPtr(v.(string)) } + if v, ok := d.GetOk("redact_client_log_data"); ok { + params.RedactClientLogData = conversion.Pointer(v.(bool)) + } + if v, ok := d.GetOk("config_server_management_mode"); ok { + params.ConfigServerManagementMode = conversion.StringPtr(v.(string)) + } // Validate oplog_size_mb to show the error before the cluster is created. if oplogSizeMB, ok := d.GetOkExists("advanced_configuration.0.oplog_size_mb"); ok { @@ -553,7 +572,9 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di if err := d.Set("replica_set_scaling_strategy", cluster.GetReplicaSetScalingStrategy()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replica_set_scaling_strategy", clusterName, err)) } - + if err := d.Set("redact_client_log_data", cluster.GetRedactClientLogData()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) + } zoneNameToZoneIDs, err := getZoneIDsFromNewAPI(cluster) if err != nil { return diag.FromErr(err) @@ -565,6 +586,8 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di } clusterResp = convertClusterDescToLatestExcludeRepSpecs(clusterOldSDK) + clusterResp.ConfigServerManagementMode = cluster.ConfigServerManagementMode + clusterResp.ConfigServerType = cluster.ConfigServerType } else { cluster, resp, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() if err != nil { @@ -582,6 +605,9 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di if err := d.Set("replica_set_scaling_strategy", cluster.GetReplicaSetScalingStrategy()); err != nil { return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replica_set_scaling_strategy", clusterName, err)) } + if err := d.Set("redact_client_log_data", cluster.GetRedactClientLogData()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "redact_client_log_data", clusterName, err)) + } zoneNameToOldReplicationSpecIDs, err := getReplicationSpecIDsFromOldAPI(ctx, projectID, clusterName, connV220240530) if err != nil { @@ -735,6 +761,13 @@ func setRootFields(d *schema.ResourceData, cluster *admin.ClusterDescription2024 return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "global_cluster_self_managed_sharding", clusterName, err)) } + if err := d.Set("config_server_type", cluster.GetConfigServerType()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "config_server_type", clusterName, err)) + } + + if err := d.Set("config_server_management_mode", cluster.GetConfigServerManagementMode()); err != nil { + return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "config_server_management_mode", clusterName, err)) + } return nil } @@ -801,6 +834,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. return diags } clusterChangeDetect := new(admin20240530.AdvancedClusterDescription) + var waitOnUpdate bool if !reflect.DeepEqual(req, clusterChangeDetect) { if err := CheckRegionConfigsPriorityOrderOld(req.GetReplicationSpecs()); err != nil { return diag.FromErr(err) @@ -808,16 +842,25 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. if _, _, err := connV220240530.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } - if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { - return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) + waitOnUpdate = true + } + if d.HasChange("replica_set_scaling_strategy") || d.HasChange("redact_client_log_data") || d.HasChange("config_server_management_mode") { + request := new(admin.ClusterDescription20240805) + if d.HasChange("replica_set_scaling_strategy") { + request.ReplicaSetScalingStrategy = conversion.Pointer(d.Get("replica_set_scaling_strategy").(string)) } - } else if d.HasChange("replica_set_scaling_strategy") { - request := &admin.ClusterDescription20240805{ - ReplicaSetScalingStrategy: conversion.Pointer(d.Get("replica_set_scaling_strategy").(string)), + if d.HasChange("redact_client_log_data") { + request.RedactClientLogData = conversion.Pointer(d.Get("redact_client_log_data").(bool)) + } + if d.HasChange("config_server_management_mode") { + request.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string)) } if _, _, err := connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, request).Execute(); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } + waitOnUpdate = true + } + if waitOnUpdate { if err := waitForUpdateToFinish(ctx, connV2, projectID, clusterName, timeout); err != nil { return diag.FromErr(fmt.Errorf(errorUpdate, clusterName, err)) } @@ -972,6 +1015,13 @@ func updateRequest(ctx context.Context, d *schema.ResourceData, projectID, clust if d.HasChange("replica_set_scaling_strategy") { cluster.ReplicaSetScalingStrategy = conversion.Pointer(d.Get("replica_set_scaling_strategy").(string)) } + if d.HasChange("redact_client_log_data") { + cluster.RedactClientLogData = conversion.Pointer(d.Get("redact_client_log_data").(bool)) + } + if d.HasChange("config_server_management_mode") { + cluster.ConfigServerManagementMode = conversion.StringPtr(d.Get("config_server_management_mode").(string)) + } + return cluster, nil } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go b/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go index 77e15b3c4c..ad47313150 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_migration_test.go @@ -47,7 +47,7 @@ func TestMigAdvancedCluster_replicaSetAWSProviderUpdate(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { mig.PreCheckBasic(t) }, + PreCheck: mig.PreCheckBasicSleep(t), CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { @@ -192,7 +192,7 @@ func TestMigAdvancedCluster_partialAdvancedConf(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: mig.PreCheckBasicSleep(t), CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 92181c1685..40904c5f41 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -24,6 +24,11 @@ const ( dataSourcePluralName = "data.mongodbatlas_advanced_clusters.test" ) +var ( + configServerManagementModeFixedToDedicated = "FIXED_TO_DEDICATED" + configServerManagementModeAtlasManaged = "ATLAS_MANAGED" +) + func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) { var ( projectID = acc.ProjectIDExecution(t) @@ -31,7 +36,7 @@ func TestAccClusterAdvancedCluster_basicTenant(t *testing.T) { clusterNameUpdated = acc.RandomClusterName() ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -64,7 +69,7 @@ func replicaSetAWSProviderTestCase(t *testing.T) resource.TestCase { ) return resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -142,12 +147,12 @@ func singleShardedMultiCloudTestCase(t *testing.T) resource.TestCase { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 1, "M10"), - Check: checkShardedOldSchemaMultiCloud(clusterName, 1, "M10", true), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 1, "M10", nil), + Check: checkShardedOldSchemaMultiCloud(clusterName, 1, "M10", true, nil), }, { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterNameUpdated, 1, "M10"), - Check: checkShardedOldSchemaMultiCloud(clusterNameUpdated, 1, "M10", true), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterNameUpdated, 1, "M10", nil), + Check: checkShardedOldSchemaMultiCloud(clusterNameUpdated, 1, "M10", true, nil), }, { ResourceName: resourceName, @@ -169,7 +174,7 @@ func TestAccClusterAdvancedCluster_unpausedToPaused(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -204,7 +209,7 @@ func TestAccClusterAdvancedCluster_pausedToUnpaused(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -266,7 +271,7 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -301,7 +306,7 @@ func TestAccClusterAdvancedCluster_advancedConfig_MongoDBVersion5(t *testing.T) ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -342,7 +347,7 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -374,7 +379,7 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAutoScaling(t *testing. ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -417,7 +422,7 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAnalyticsAutoScaling(t ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -530,7 +535,7 @@ func TestAccClusterAdvancedClusterConfig_selfManagedShardingIncorrectType(t *tes ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -555,12 +560,12 @@ func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchema(t *testing.T) CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M10"), - Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M10", false), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M10", &configServerManagementModeFixedToDedicated), + Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M10", false, &configServerManagementModeFixedToDedicated), }, { - Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M20"), - Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M20", false), + Config: configShardedOldSchemaMultiCloud(orgID, projectName, clusterName, 2, "M20", &configServerManagementModeAtlasManaged), + Check: checkShardedOldSchemaMultiCloud(clusterName, 2, "M20", false, &configServerManagementModeAtlasManaged), }, }, }) @@ -748,7 +753,7 @@ func TestAccClusterAdvancedClusterConfig_geoShardedTransitionFromOldToNewSchema( }) } -func TestAccAdvancedCluster_replicaSetScalingStrategy(t *testing.T) { +func TestAccAdvancedCluster_replicaSetScalingStrategyAndRedactClientLogData(t *testing.T) { var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() @@ -761,22 +766,26 @@ func TestAccAdvancedCluster_replicaSetScalingStrategy(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configReplicaSetScalingStrategy(orgID, projectName, clusterName, "WORKLOAD_TYPE"), - Check: checkReplicaSetScalingStrategy("WORKLOAD_TYPE"), + Config: configReplicaSetScalingStrategyAndRedactClientLogData(orgID, projectName, clusterName, "WORKLOAD_TYPE", true), + Check: checkReplicaSetScalingStrategyAndRedactClientLogData("WORKLOAD_TYPE", true), + }, + { + Config: configReplicaSetScalingStrategyAndRedactClientLogData(orgID, projectName, clusterName, "SEQUENTIAL", false), + Check: checkReplicaSetScalingStrategyAndRedactClientLogData("SEQUENTIAL", false), }, { - Config: configReplicaSetScalingStrategy(orgID, projectName, clusterName, "SEQUENTIAL"), - Check: checkReplicaSetScalingStrategy("SEQUENTIAL"), + Config: configReplicaSetScalingStrategyAndRedactClientLogData(orgID, projectName, clusterName, "NODE_TYPE", true), + Check: checkReplicaSetScalingStrategyAndRedactClientLogData("NODE_TYPE", true), }, { - Config: configReplicaSetScalingStrategy(orgID, projectName, clusterName, "NODE_TYPE"), - Check: checkReplicaSetScalingStrategy("NODE_TYPE"), + Config: configReplicaSetScalingStrategyAndRedactClientLogData(orgID, projectName, clusterName, "NODE_TYPE", false), + Check: checkReplicaSetScalingStrategyAndRedactClientLogData("NODE_TYPE", false), }, }, }) } -func TestAccAdvancedCluster_replicaSetScalingStrategyOldSchema(t *testing.T) { +func TestAccAdvancedCluster_replicaSetScalingStrategyAndRedactClientLogDataOldSchema(t *testing.T) { var ( orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() @@ -789,16 +798,16 @@ func TestAccAdvancedCluster_replicaSetScalingStrategyOldSchema(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configReplicaSetScalingStrategyOldSchema(orgID, projectName, clusterName, "WORKLOAD_TYPE"), - Check: checkReplicaSetScalingStrategy("WORKLOAD_TYPE"), + Config: configReplicaSetScalingStrategyAndRedactClientLogDataOldSchema(orgID, projectName, clusterName, "WORKLOAD_TYPE", false), + Check: checkReplicaSetScalingStrategyAndRedactClientLogData("WORKLOAD_TYPE", false), }, { - Config: configReplicaSetScalingStrategyOldSchema(orgID, projectName, clusterName, "SEQUENTIAL"), - Check: checkReplicaSetScalingStrategy("SEQUENTIAL"), + Config: configReplicaSetScalingStrategyAndRedactClientLogDataOldSchema(orgID, projectName, clusterName, "SEQUENTIAL", true), + Check: checkReplicaSetScalingStrategyAndRedactClientLogData("SEQUENTIAL", true), }, { - Config: configReplicaSetScalingStrategyOldSchema(orgID, projectName, clusterName, "NODE_TYPE"), - Check: checkReplicaSetScalingStrategy("NODE_TYPE"), + Config: configReplicaSetScalingStrategyAndRedactClientLogDataOldSchema(orgID, projectName, clusterName, "NODE_TYPE", false), + Check: checkReplicaSetScalingStrategyAndRedactClientLogData("NODE_TYPE", false), }, }, }) @@ -807,7 +816,8 @@ func TestAccAdvancedCluster_replicaSetScalingStrategyOldSchema(t *testing.T) { // TestAccClusterAdvancedCluster_priorityOldSchema will be able to be simplied or deleted in CLOUDP-275825 func TestAccClusterAdvancedCluster_priorityOldSchema(t *testing.T) { var ( - projectID = acc.ProjectIDExecution(t) + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region clusterName = acc.RandomClusterName() ) @@ -817,15 +827,15 @@ func TestAccClusterAdvancedCluster_priorityOldSchema(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configPriority(projectID, clusterName, true, true), + Config: configPriority(orgID, projectName, clusterName, true, true), ExpectError: regexp.MustCompile("priority values in region_configs must be in descending order"), }, { - Config: configPriority(projectID, clusterName, true, false), + Config: configPriority(orgID, projectName, clusterName, true, false), Check: resource.TestCheckResourceAttr(resourceName, "replication_specs.0.region_configs.#", "2"), }, { - Config: configPriority(projectID, clusterName, true, true), + Config: configPriority(orgID, projectName, clusterName, true, true), ExpectError: regexp.MustCompile("priority values in region_configs must be in descending order"), }, }, @@ -835,7 +845,8 @@ func TestAccClusterAdvancedCluster_priorityOldSchema(t *testing.T) { // TestAccClusterAdvancedCluster_priorityNewSchema will be able to be simplied or deleted in CLOUDP-275825 func TestAccClusterAdvancedCluster_priorityNewSchema(t *testing.T) { var ( - projectID = acc.ProjectIDExecution(t) + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution to avoid cross-region limits because multi-region clusterName = acc.RandomClusterName() ) @@ -845,15 +856,15 @@ func TestAccClusterAdvancedCluster_priorityNewSchema(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configPriority(projectID, clusterName, false, true), + Config: configPriority(orgID, projectName, clusterName, false, true), ExpectError: regexp.MustCompile("priority values in region_configs must be in descending order"), }, { - Config: configPriority(projectID, clusterName, false, false), + Config: configPriority(orgID, projectName, clusterName, false, false), Check: resource.TestCheckResourceAttr(resourceName, "replication_specs.0.region_configs.#", "2"), }, { - Config: configPriority(projectID, clusterName, false, true), + Config: configPriority(orgID, projectName, clusterName, false, true), ExpectError: regexp.MustCompile("priority values in region_configs must be in descending order"), }, }, @@ -1175,7 +1186,17 @@ func checkReplicaSetMultiCloud(name string, regionConfigs int) resource.TestChec ) } -func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards int, analyticsSize string) string { +func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards int, analyticsSize string, configServerManagementMode *string) string { + var rootConfig string + if configServerManagementMode != nil { + // valid values: FIXED_TO_DEDICATED or ATLAS_MANAGED (default) + // only valid for Major version 8 and later + // cluster must be SHARDED + rootConfig = fmt.Sprintf(` + mongo_db_major_version = "8" + config_server_management_mode = %[1]q + `, *configServerManagementMode) + } return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -1186,6 +1207,7 @@ func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards project_id = mongodbatlas_project.cluster_project.id name = %[3]q cluster_type = "SHARDED" + %[6]s replication_specs { num_shards = %[4]d @@ -1217,11 +1239,16 @@ func configShardedOldSchemaMultiCloud(orgID, projectName, name string, numShards data "mongodbatlas_advanced_cluster" "test" { project_id = mongodbatlas_advanced_cluster.test.project_id name = mongodbatlas_advanced_cluster.test.name + depends_on = [mongodbatlas_advanced_cluster.test] } - `, orgID, projectName, name, numShards, analyticsSize) + data "mongodbatlas_advanced_clusters" "test" { + project_id = mongodbatlas_advanced_cluster.test.project_id + depends_on = [mongodbatlas_advanced_cluster.test] + } + `, orgID, projectName, name, numShards, analyticsSize, rootConfig) } -func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize string, verifyExternalID bool) resource.TestCheckFunc { +func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize string, verifyExternalID bool, configServerManagementMode *string) resource.TestCheckFunc { additionalChecks := []resource.TestCheckFunc{ resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), @@ -1236,6 +1263,17 @@ func checkShardedOldSchemaMultiCloud(name string, numShards int, analyticsSize s additionalChecks, resource.TestCheckResourceAttrSet(resourceName, "replication_specs.0.external_id")) } + if configServerManagementMode != nil { + additionalChecks = append( + additionalChecks, + resource.TestCheckResourceAttr(resourceName, "config_server_management_mode", *configServerManagementMode), + resource.TestCheckResourceAttrSet(resourceName, "config_server_type"), + resource.TestCheckResourceAttr(dataSourceName, "config_server_management_mode", *configServerManagementMode), + resource.TestCheckResourceAttrSet(dataSourceName, "config_server_type"), + resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.config_server_management_mode", *configServerManagementMode), + resource.TestCheckResourceAttrSet(dataSourcePluralName, "results.0.config_server_type"), + ) + } return checkAggr( []string{"project_id", "replication_specs.#", "replication_specs.0.id", "replication_specs.0.region_configs.#"}, @@ -2076,7 +2114,7 @@ func checkGeoShardedTransitionOldToNewSchema(useNewSchema bool) resource.TestChe ) } -func configReplicaSetScalingStrategy(orgID, projectName, name, replicaSetScalingStrategy string) string { +func configReplicaSetScalingStrategyAndRedactClientLogData(orgID, projectName, name, replicaSetScalingStrategy string, redactClientLogData bool) string { return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -2089,6 +2127,7 @@ func configReplicaSetScalingStrategy(orgID, projectName, name, replicaSetScaling backup_enabled = false cluster_type = "SHARDED" replica_set_scaling_strategy = %[4]q + redact_client_log_data = %[5]t replication_specs { region_configs { @@ -2119,10 +2158,10 @@ func configReplicaSetScalingStrategy(orgID, projectName, name, replicaSetScaling project_id = mongodbatlas_advanced_cluster.test.project_id use_replication_spec_per_shard = true } - `, orgID, projectName, name, replicaSetScalingStrategy) + `, orgID, projectName, name, replicaSetScalingStrategy, redactClientLogData) } -func configReplicaSetScalingStrategyOldSchema(orgID, projectName, name, replicaSetScalingStrategy string) string { +func configReplicaSetScalingStrategyAndRedactClientLogDataOldSchema(orgID, projectName, name, replicaSetScalingStrategy string, redactClientLogData bool) string { return fmt.Sprintf(` resource "mongodbatlas_project" "cluster_project" { org_id = %[1]q @@ -2135,6 +2174,7 @@ func configReplicaSetScalingStrategyOldSchema(orgID, projectName, name, replicaS backup_enabled = false cluster_type = "SHARDED" replica_set_scaling_strategy = %[4]q + redact_client_log_data = %[5]t replication_specs { num_shards = 2 @@ -2164,16 +2204,18 @@ func configReplicaSetScalingStrategyOldSchema(orgID, projectName, name, replicaS data "mongodbatlas_advanced_clusters" "test" { project_id = mongodbatlas_advanced_cluster.test.project_id } - `, orgID, projectName, name, replicaSetScalingStrategy) + `, orgID, projectName, name, replicaSetScalingStrategy, redactClientLogData) } -func checkReplicaSetScalingStrategy(replicaSetScalingStrategy string) resource.TestCheckFunc { +func checkReplicaSetScalingStrategyAndRedactClientLogData(replicaSetScalingStrategy string, redactClientLogData bool) resource.TestCheckFunc { clusterChecks := map[string]string{ - "replica_set_scaling_strategy": replicaSetScalingStrategy} + "replica_set_scaling_strategy": replicaSetScalingStrategy, + "redact_client_log_data": strconv.FormatBool(redactClientLogData), + } // plural data source checks additionalChecks := acc.AddAttrSetChecks(dataSourcePluralName, nil, - []string{"results.#", "results.0.replica_set_scaling_strategy"}...) + []string{"results.#", "results.0.replica_set_scaling_strategy", "results.0.redact_client_log_data"}...) return checkAggr( []string{}, clusterChecks, @@ -2181,7 +2223,7 @@ func checkReplicaSetScalingStrategy(replicaSetScalingStrategy string) resource.T ) } -func configPriority(projectID, name string, oldSchema, swapPriorities bool) string { +func configPriority(orgID, projectName, clusterName string, oldSchema, swapPriorities bool) string { const ( config7 = ` region_configs { @@ -2216,16 +2258,21 @@ func configPriority(projectID, name string, oldSchema, swapPriorities bool) stri } return fmt.Sprintf(` + resource "mongodbatlas_project" "test" { + org_id = %[1]q + name = %[2]q + } + resource "mongodbatlas_advanced_cluster" "test" { - project_id = %[1]q - name = %[2]q - cluster_type = %[3]q + project_id = mongodbatlas_project.test.id + name = %[3]q + cluster_type = %[4]q backup_enabled = false replication_specs { - %[4]s %[5]s + %[6]s } } - `, projectID, name, strType, strNumShards, strConfigs) + `, orgID, projectName, clusterName, strType, strNumShards, strConfigs) } diff --git a/internal/service/advancedclustertpf/README.md b/internal/service/advancedclustertpf/README.md new file mode 100644 index 0000000000..8275f6fd58 --- /dev/null +++ b/internal/service/advancedclustertpf/README.md @@ -0,0 +1,9 @@ +# advancedclustertpf package + +This package contains the WIP for `mongodbatlas_advanced_cluster` in TPF. Current exposed implementation is in `advancedcluster` package. + +**Note:** This file will be deleted once the update is complete and `advancedcluster` package will be deleted. + +## Changes in advancedcluster that needs to be added here +(list changes done in advancedcluster which couldn't also be done here at that moment) + diff --git a/internal/service/advancedclustertpf/data_source.go b/internal/service/advancedclustertpf/data_source.go new file mode 100644 index 0000000000..2fee67d592 --- /dev/null +++ b/internal/service/advancedclustertpf/data_source.go @@ -0,0 +1,32 @@ +package advancedclustertpf + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" +) + +const resourceName = "advanced_cluster" // TODO: if resource exists this can be deleted + +var _ datasource.DataSource = &ds{} +var _ datasource.DataSourceWithConfigure = &ds{} + +func DataSource() datasource.DataSource { + return &ds{ + DSCommon: config.DSCommon{ + DataSourceName: resourceName, + }, + } +} + +type ds struct { + config.DSCommon +} + +func (d *ds) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = DataSourceSchema(ctx) +} + +func (d *ds) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { +} diff --git a/internal/service/advancedclustertpf/data_source_schema.go b/internal/service/advancedclustertpf/data_source_schema.go new file mode 100644 index 0000000000..8bcf17f177 --- /dev/null +++ b/internal/service/advancedclustertpf/data_source_schema.go @@ -0,0 +1,731 @@ +package advancedclustertpf + +import ( + "context" + "regexp" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func DataSourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "accept_data_risks_and_force_replica_set_reconfig": schema.StringAttribute{ + Computed: true, + Description: "If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set **acceptDataRisksAndForceReplicaSetReconfig** to the current date.", + MarkdownDescription: "If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set **acceptDataRisksAndForceReplicaSetReconfig** to the current date.", + }, + "backup_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the cluster can perform backups. If set to `true`, the cluster can perform backups. You must set this value to `true` for NVMe clusters. Backup uses [Cloud Backups](https://docs.atlas.mongodb.com/backup/cloud-backup/overview/) for dedicated clusters and [Shared Cluster Backups](https://docs.atlas.mongodb.com/backup/shared-tier/overview/) for tenant clusters. If set to `false`, the cluster doesn't use backups.", + MarkdownDescription: "Flag that indicates whether the cluster can perform backups. If set to `true`, the cluster can perform backups. You must set this value to `true` for NVMe clusters. Backup uses [Cloud Backups](https://docs.atlas.mongodb.com/backup/cloud-backup/overview/) for dedicated clusters and [Shared Cluster Backups](https://docs.atlas.mongodb.com/backup/shared-tier/overview/) for tenant clusters. If set to `false`, the cluster doesn't use backups.", + }, + "bi_connector": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether MongoDB Connector for Business Intelligence is enabled on the specified cluster.", + MarkdownDescription: "Flag that indicates whether MongoDB Connector for Business Intelligence is enabled on the specified cluster.", + }, + "read_preference": schema.StringAttribute{ + Computed: true, + Description: "Data source node designated for the MongoDB Connector for Business Intelligence on MongoDB Cloud. The MongoDB Connector for Business Intelligence on MongoDB Cloud reads data from the primary, secondary, or analytics node based on your read preferences. Defaults to `ANALYTICS` node, or `SECONDARY` if there are no `ANALYTICS` nodes.", + MarkdownDescription: "Data source node designated for the MongoDB Connector for Business Intelligence on MongoDB Cloud. The MongoDB Connector for Business Intelligence on MongoDB Cloud reads data from the primary, secondary, or analytics node based on your read preferences. Defaults to `ANALYTICS` node, or `SECONDARY` if there are no `ANALYTICS` nodes.", + }, + }, + CustomType: BiConnectorType{ + ObjectType: types.ObjectType{ + AttrTypes: BiConnectorValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Settings needed to configure the MongoDB Connector for Business Intelligence for this cluster.", + MarkdownDescription: "Settings needed to configure the MongoDB Connector for Business Intelligence for this cluster.", + }, + "cluster_name": schema.StringAttribute{ + Required: true, + Description: "Human-readable label that identifies this cluster.", + MarkdownDescription: "Human-readable label that identifies this cluster.", + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-zA-Z0-9][a-zA-Z0-9-]*)?[a-zA-Z0-9]+$"), ""), + }, + }, + "cluster_type": schema.StringAttribute{ + Computed: true, + Description: "Configuration of nodes that comprise the cluster.", + MarkdownDescription: "Configuration of nodes that comprise the cluster.", + }, + "config_server_management_mode": schema.StringAttribute{ + Computed: true, + Description: "Config Server Management Mode for creating or updating a sharded cluster.\n\nWhen configured as ATLAS_MANAGED, atlas may automatically switch the cluster's config server type for optimal performance and savings.\n\nWhen configured as FIXED_TO_DEDICATED, the cluster will always use a dedicated config server.", + MarkdownDescription: "Config Server Management Mode for creating or updating a sharded cluster.\n\nWhen configured as ATLAS_MANAGED, atlas may automatically switch the cluster's config server type for optimal performance and savings.\n\nWhen configured as FIXED_TO_DEDICATED, the cluster will always use a dedicated config server.", + }, + "config_server_type": schema.StringAttribute{ + Computed: true, + Description: "Describes a sharded cluster's config server type.", + MarkdownDescription: "Describes a sharded cluster's config server type.", + }, + "connection_strings": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "aws_private_link": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to MongoDB Cloud through the interface endpoint that the key names.", + MarkdownDescription: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to MongoDB Cloud through the interface endpoint that the key names.", + }, + "aws_private_link_srv": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to Atlas through the interface endpoint that the key names.", + MarkdownDescription: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to Atlas through the interface endpoint that the key names.", + }, + "private": schema.StringAttribute{ + Computed: true, + Description: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter once someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn't, use connectionStrings.private. For Amazon Web Services (AWS) clusters, this resource returns this parameter only if you enable custom DNS.", + MarkdownDescription: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter once someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn't, use connectionStrings.private. For Amazon Web Services (AWS) clusters, this resource returns this parameter only if you enable custom DNS.", + }, + "private_endpoint": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "connection_string": schema.StringAttribute{ + Computed: true, + Description: "Private endpoint-aware connection string that uses the `mongodb://` protocol to connect to MongoDB Cloud through a private endpoint.", + MarkdownDescription: "Private endpoint-aware connection string that uses the `mongodb://` protocol to connect to MongoDB Cloud through a private endpoint.", + }, + "endpoints": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "endpoint_id": schema.StringAttribute{ + Computed: true, + Description: "Unique string that the cloud provider uses to identify the private endpoint.", + MarkdownDescription: "Unique string that the cloud provider uses to identify the private endpoint.", + }, + "provider_name": schema.StringAttribute{ + Computed: true, + Description: "Cloud provider in which MongoDB Cloud deploys the private endpoint.", + MarkdownDescription: "Cloud provider in which MongoDB Cloud deploys the private endpoint.", + }, + "region": schema.StringAttribute{ + Computed: true, + Description: "Region where the private endpoint is deployed.", + MarkdownDescription: "Region where the private endpoint is deployed.", + }, + }, + CustomType: EndpointsType{ + ObjectType: types.ObjectType{ + AttrTypes: EndpointsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List that contains the private endpoints through which you connect to MongoDB Cloud when you use **connectionStrings.privateEndpoint[n].connectionString** or **connectionStrings.privateEndpoint[n].srvConnectionString**.", + MarkdownDescription: "List that contains the private endpoints through which you connect to MongoDB Cloud when you use **connectionStrings.privateEndpoint[n].connectionString** or **connectionStrings.privateEndpoint[n].srvConnectionString**.", + }, + "srv_connection_string": schema.StringAttribute{ + Computed: true, + Description: "Private endpoint-aware connection string that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. The `mongodb+srv` protocol tells the driver to look up the seed list of hosts in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application supports it. If it doesn't, use connectionStrings.privateEndpoint[n].connectionString.", + MarkdownDescription: "Private endpoint-aware connection string that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. The `mongodb+srv` protocol tells the driver to look up the seed list of hosts in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application supports it. If it doesn't, use connectionStrings.privateEndpoint[n].connectionString.", + }, + "srv_shard_optimized_connection_string": schema.StringAttribute{ + Computed: true, + Description: "Private endpoint-aware connection string optimized for sharded clusters that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application and Atlas cluster supports it. If it doesn't, use and consult the documentation for connectionStrings.privateEndpoint[n].srvConnectionString.", + MarkdownDescription: "Private endpoint-aware connection string optimized for sharded clusters that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application and Atlas cluster supports it. If it doesn't, use and consult the documentation for connectionStrings.privateEndpoint[n].srvConnectionString.", + }, + "type": schema.StringAttribute{ + Computed: true, + Description: "MongoDB process type to which your application connects. Use `MONGOD` for replica sets and `MONGOS` for sharded clusters.", + MarkdownDescription: "MongoDB process type to which your application connects. Use `MONGOD` for replica sets and `MONGOS` for sharded clusters.", + }, + }, + CustomType: PrivateEndpointType{ + ObjectType: types.ObjectType{ + AttrTypes: PrivateEndpointValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of private endpoint-aware connection strings that you can use to connect to this cluster through a private endpoint. This parameter returns only if you deployed a private endpoint to all regions to which you deployed this clusters' nodes.", + MarkdownDescription: "List of private endpoint-aware connection strings that you can use to connect to this cluster through a private endpoint. This parameter returns only if you deployed a private endpoint to all regions to which you deployed this clusters' nodes.", + }, + "private_srv": schema.StringAttribute{ + Computed: true, + Description: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter when someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your driver supports it. If it doesn't, use `connectionStrings.private`. For Amazon Web Services (AWS) clusters, this parameter returns only if you [enable custom DNS](https://docs.atlas.mongodb.com/reference/api/aws-custom-dns-update/).", + MarkdownDescription: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter when someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your driver supports it. If it doesn't, use `connectionStrings.private`. For Amazon Web Services (AWS) clusters, this parameter returns only if you [enable custom DNS](https://docs.atlas.mongodb.com/reference/api/aws-custom-dns-update/).", + }, + "standard": schema.StringAttribute{ + Computed: true, + Description: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb://` protocol.", + MarkdownDescription: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb://` protocol.", + }, + "standard_srv": schema.StringAttribute{ + Computed: true, + Description: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb+srv://` protocol.", + MarkdownDescription: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb+srv://` protocol.", + }, + }, + CustomType: ConnectionStringsType{ + ObjectType: types.ObjectType{ + AttrTypes: ConnectionStringsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Collection of Uniform Resource Locators that point to the MongoDB database.", + MarkdownDescription: "Collection of Uniform Resource Locators that point to the MongoDB database.", + }, + "create_date": schema.StringAttribute{ + Computed: true, + Description: "Date and time when MongoDB Cloud created this cluster. This parameter expresses its value in ISO 8601 format in UTC.", + MarkdownDescription: "Date and time when MongoDB Cloud created this cluster. This parameter expresses its value in ISO 8601 format in UTC.", + }, + "disk_warming_mode": schema.StringAttribute{ + Computed: true, + Description: "Disk warming mode selection.", + MarkdownDescription: "Disk warming mode selection.", + }, + "encryption_at_rest_provider": schema.StringAttribute{ + Computed: true, + Description: "Cloud service provider that manages your customer keys to provide an additional layer of encryption at rest for the cluster. To enable customer key management for encryption at rest, the cluster **replicationSpecs[n].regionConfigs[m].{type}Specs.instanceSize** setting must be `M10` or higher and `\"backupEnabled\" : false` or omitted entirely.", + MarkdownDescription: "Cloud service provider that manages your customer keys to provide an additional layer of encryption at rest for the cluster. To enable customer key management for encryption at rest, the cluster **replicationSpecs[n].regionConfigs[m].{type}Specs.instanceSize** setting must be `M10` or higher and `\"backupEnabled\" : false` or omitted entirely.", + }, + "feature_compatibility_version": schema.StringAttribute{ + Computed: true, + Description: "Feature compatibility version of the cluster.", + MarkdownDescription: "Feature compatibility version of the cluster.", + }, + "feature_compatibility_version_expiration_date": schema.StringAttribute{ + Computed: true, + Description: "Feature compatibility version expiration date.", + MarkdownDescription: "Feature compatibility version expiration date.", + }, + "global_cluster_self_managed_sharding": schema.BoolAttribute{ + Computed: true, + Description: "Set this field to configure the Sharding Management Mode when creating a new Global Cluster.\n\nWhen set to false, the management mode is set to Atlas-Managed Sharding. This mode fully manages the sharding of your Global Cluster and is built to provide a seamless deployment experience.\n\nWhen set to true, the management mode is set to Self-Managed Sharding. This mode leaves the management of shards in your hands and is built to provide an advanced and flexible deployment experience.\n\nThis setting cannot be changed once the cluster is deployed.", + MarkdownDescription: "Set this field to configure the Sharding Management Mode when creating a new Global Cluster.\n\nWhen set to false, the management mode is set to Atlas-Managed Sharding. This mode fully manages the sharding of your Global Cluster and is built to provide a seamless deployment experience.\n\nWhen set to true, the management mode is set to Self-Managed Sharding. This mode leaves the management of shards in your hands and is built to provide an advanced and flexible deployment experience.\n\nThis setting cannot be changed once the cluster is deployed.", + }, + "group_id": schema.StringAttribute{ + Required: true, + Description: "Unique 24-hexadecimal digit string that identifies your project. Use the [/groups](#tag/Projects/operation/listProjects) endpoint to retrieve all projects to which the authenticated user has access.\n\n**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies your project. Use the [/groups](#tag/Projects/operation/listProjects) endpoint to retrieve all projects to which the authenticated user has access.\n\n**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups.", + Validators: []validator.String{ + stringvalidator.LengthBetween(24, 24), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-f0-9]{24})$"), ""), + }, + }, + "id": schema.StringAttribute{ + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the cluster.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the cluster.", + }, + "labels": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Computed: true, + Description: "Key applied to tag and categorize this component.", + MarkdownDescription: "Key applied to tag and categorize this component.", + }, + "value": schema.StringAttribute{ + Computed: true, + Description: "Value set to the Key applied to tag and categorize this component.", + MarkdownDescription: "Value set to the Key applied to tag and categorize this component.", + }, + }, + CustomType: LabelsType{ + ObjectType: types.ObjectType{ + AttrTypes: LabelsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "Collection of key-value pairs between 1 to 255 characters in length that tag and categorize the cluster. The MongoDB Cloud console doesn't display your labels.\n\nCluster labels are deprecated and will be removed in a future release. We strongly recommend that you use [resource tags](https://dochub.mongodb.org/core/add-cluster-tag-atlas) instead.", + MarkdownDescription: "Collection of key-value pairs between 1 to 255 characters in length that tag and categorize the cluster. The MongoDB Cloud console doesn't display your labels.\n\nCluster labels are deprecated and will be removed in a future release. We strongly recommend that you use [resource tags](https://dochub.mongodb.org/core/add-cluster-tag-atlas) instead.", + DeprecationMessage: "This attribute is deprecated.", + }, + "links": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "href": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + "rel": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + }, + CustomType: LinksType{ + ObjectType: types.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + MarkdownDescription: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + }, + "mongo_dbemployee_access_grant": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "expiration_time": schema.StringAttribute{ + Computed: true, + Description: "Expiration date for the employee access grant.", + MarkdownDescription: "Expiration date for the employee access grant.", + }, + "grant_type": schema.StringAttribute{ + Computed: true, + Description: "Level of access to grant to MongoDB Employees.", + MarkdownDescription: "Level of access to grant to MongoDB Employees.", + }, + "links": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "href": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + "rel": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + }, + CustomType: LinksType{ + ObjectType: types.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + MarkdownDescription: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + }, + }, + CustomType: MongoDbemployeeAccessGrantType{ + ObjectType: types.ObjectType{ + AttrTypes: MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "MongoDB employee granted access level and expiration for a cluster.", + MarkdownDescription: "MongoDB employee granted access level and expiration for a cluster.", + }, + "mongo_dbmajor_version": schema.StringAttribute{ + Computed: true, + Description: "MongoDB major version of the cluster.\n\nOn creation: Choose from the available versions of MongoDB, or leave unspecified for the current recommended default in the MongoDB Cloud platform. The recommended version is a recent Long Term Support version. The default is not guaranteed to be the most recently released version throughout the entire release cycle. For versions available in a specific project, see the linked documentation or use the API endpoint for [project LTS versions endpoint](#tag/Projects/operation/getProjectLTSVersions).\n\n On update: Increase version only by 1 major version at a time. If the cluster is pinned to a MongoDB feature compatibility version exactly one major version below the current MongoDB version, the MongoDB version can be downgraded to the previous major version.", + MarkdownDescription: "MongoDB major version of the cluster.\n\nOn creation: Choose from the available versions of MongoDB, or leave unspecified for the current recommended default in the MongoDB Cloud platform. The recommended version is a recent Long Term Support version. The default is not guaranteed to be the most recently released version throughout the entire release cycle. For versions available in a specific project, see the linked documentation or use the API endpoint for [project LTS versions endpoint](#tag/Projects/operation/getProjectLTSVersions).\n\n On update: Increase version only by 1 major version at a time. If the cluster is pinned to a MongoDB feature compatibility version exactly one major version below the current MongoDB version, the MongoDB version can be downgraded to the previous major version.", + }, + "mongo_dbversion": schema.StringAttribute{ + Computed: true, + Description: "Version of MongoDB that the cluster runs.", + MarkdownDescription: "Version of MongoDB that the cluster runs.", + }, + "name": schema.StringAttribute{ + Computed: true, + Description: "Human-readable label that identifies the cluster.", + MarkdownDescription: "Human-readable label that identifies the cluster.", + }, + "paused": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the cluster is paused.", + MarkdownDescription: "Flag that indicates whether the cluster is paused.", + }, + "pit_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the cluster uses continuous cloud backups.", + MarkdownDescription: "Flag that indicates whether the cluster uses continuous cloud backups.", + }, + "redact_client_log_data": schema.BoolAttribute{ + Computed: true, + Description: "Enable or disable log redaction.\n\nThis setting configures the ``mongod`` or ``mongos`` to redact any document field contents from a message accompanying a given log event before logging. This prevents the program from writing potentially sensitive data stored on the database to the diagnostic log. Metadata such as error or operation codes, line numbers, and source file names are still visible in the logs.\n\nUse ``redactClientLogData`` in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements.\n\n*Note*: changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated.", + MarkdownDescription: "Enable or disable log redaction.\n\nThis setting configures the ``mongod`` or ``mongos`` to redact any document field contents from a message accompanying a given log event before logging. This prevents the program from writing potentially sensitive data stored on the database to the diagnostic log. Metadata such as error or operation codes, line numbers, and source file names are still visible in the logs.\n\nUse ``redactClientLogData`` in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements.\n\n*Note*: changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated.", + }, + "replica_set_scaling_strategy": schema.StringAttribute{ + Computed: true, + Description: "Set this field to configure the replica set scaling mode for your cluster.\n\nBy default, Atlas scales under WORKLOAD_TYPE. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes.\n\nWhen configured as SEQUENTIAL, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads.\n\nWhen configured as NODE_TYPE, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads.", + MarkdownDescription: "Set this field to configure the replica set scaling mode for your cluster.\n\nBy default, Atlas scales under WORKLOAD_TYPE. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes.\n\nWhen configured as SEQUENTIAL, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads.\n\nWhen configured as NODE_TYPE, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads.", + }, + "replication_specs": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the replication object for a shard in a Cluster. If you include existing shard replication configurations in the request, you must specify this parameter. If you add a new shard to an existing Cluster, you may specify this parameter. The request deletes any existing shards in the Cluster that you exclude from the request. This corresponds to Shard ID displayed in the UI.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the replication object for a shard in a Cluster. If you include existing shard replication configurations in the request, you must specify this parameter. If you add a new shard to an existing Cluster, you may specify this parameter. The request deletes any existing shards in the Cluster that you exclude from the request. This corresponds to Shard ID displayed in the UI.", + }, + "region_configs": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "analytics_auto_scaling": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + MarkdownDescription: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + }, + "max_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "min_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "scale_down_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + MarkdownDescription: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + }, + }, + CustomType: ComputeType{ + ObjectType: types.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles CPU scaling.", + MarkdownDescription: "Options that determine how this cluster handles CPU scaling.", + }, + "disk_gb": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + MarkdownDescription: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + }, + }, + CustomType: DiskGbType{ + ObjectType: types.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Setting that enables disk auto-scaling.", + MarkdownDescription: "Setting that enables disk auto-scaling.", + }, + }, + CustomType: AnalyticsAutoScalingType{ + ObjectType: types.ObjectType{ + AttrTypes: AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles resource scaling.", + MarkdownDescription: "Options that determine how this cluster handles resource scaling.", + }, + "analytics_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + }, + "instance_size": schema.StringAttribute{ + Computed: true, + Description: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + MarkdownDescription: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + }, + "node_count": schema.Int64Attribute{ + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: AnalyticsSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: AnalyticsSpecsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + MarkdownDescription: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + }, + "auto_scaling": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + MarkdownDescription: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + }, + "max_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "min_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "scale_down_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + MarkdownDescription: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + }, + }, + CustomType: ComputeType{ + ObjectType: types.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles CPU scaling.", + MarkdownDescription: "Options that determine how this cluster handles CPU scaling.", + }, + "disk_gb": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + MarkdownDescription: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + }, + }, + CustomType: DiskGbType{ + ObjectType: types.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Setting that enables disk auto-scaling.", + MarkdownDescription: "Setting that enables disk auto-scaling.", + }, + }, + CustomType: AutoScalingType{ + ObjectType: types.ObjectType{ + AttrTypes: AutoScalingValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles resource scaling.", + MarkdownDescription: "Options that determine how this cluster handles resource scaling.", + }, + "backing_provider_name": schema.StringAttribute{ + Computed: true, + Description: "Cloud service provider on which MongoDB Cloud provisioned the multi-tenant cluster. The resource returns this parameter when **providerName** is `TENANT` and **electableSpecs.instanceSize** is `M0`, `M2` or `M5`.", + MarkdownDescription: "Cloud service provider on which MongoDB Cloud provisioned the multi-tenant cluster. The resource returns this parameter when **providerName** is `TENANT` and **electableSpecs.instanceSize** is `M0`, `M2` or `M5`.", + }, + "electable_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + }, + "instance_size": schema.StringAttribute{ + Computed: true, + Description: "Hardware specification for the instances in this M0/M2/M5 tier cluster.", + MarkdownDescription: "Hardware specification for the instances in this M0/M2/M5 tier cluster.", + }, + "node_count": schema.Int64Attribute{ + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: ElectableSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ElectableSpecsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Hardware specifications for all electable nodes deployed in the region. Electable nodes can become the primary and can enable local reads. If you don't specify this option, MongoDB Cloud deploys no electable nodes to the region.", + MarkdownDescription: "Hardware specifications for all electable nodes deployed in the region. Electable nodes can become the primary and can enable local reads. If you don't specify this option, MongoDB Cloud deploys no electable nodes to the region.", + }, + "priority": schema.Int64Attribute{ + Computed: true, + Description: "Precedence is given to this region when a primary election occurs. If your **regionConfigs** has only **readOnlySpecs**, **analyticsSpecs**, or both, set this value to `0`. If you have multiple **regionConfigs** objects (your cluster is multi-region or multi-cloud), they must have priorities in descending order. The highest priority is `7`.\n\n**Example:** If you have three regions, their priorities would be `7`, `6`, and `5` respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be `4` and `3` respectively.", + MarkdownDescription: "Precedence is given to this region when a primary election occurs. If your **regionConfigs** has only **readOnlySpecs**, **analyticsSpecs**, or both, set this value to `0`. If you have multiple **regionConfigs** objects (your cluster is multi-region or multi-cloud), they must have priorities in descending order. The highest priority is `7`.\n\n**Example:** If you have three regions, their priorities would be `7`, `6`, and `5` respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be `4` and `3` respectively.", + }, + "provider_name": schema.StringAttribute{ + Computed: true, + Description: "Cloud service provider on which MongoDB Cloud provisions the hosts. Set dedicated clusters to `AWS`, `GCP`, `AZURE` or `TENANT`.", + MarkdownDescription: "Cloud service provider on which MongoDB Cloud provisions the hosts. Set dedicated clusters to `AWS`, `GCP`, `AZURE` or `TENANT`.", + }, + "read_only_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + }, + "instance_size": schema.StringAttribute{ + Computed: true, + Description: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + MarkdownDescription: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + }, + "node_count": schema.Int64Attribute{ + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: ReadOnlySpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ReadOnlySpecsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + MarkdownDescription: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + }, + "region_name": schema.StringAttribute{ + Computed: true, + Description: "Physical location of your MongoDB cluster nodes. The region you choose can affect network latency for clients accessing your databases. The region name is only returned in the response for single-region clusters. When MongoDB Cloud deploys a dedicated cluster, it checks if a VPC or VPC connection exists for that provider and region. If not, MongoDB Cloud creates them as part of the deployment. It assigns the VPC a Classless Inter-Domain Routing (CIDR) block. To limit a new VPC peering connection to one Classless Inter-Domain Routing (CIDR) block and region, create the connection first. Deploy the cluster after the connection starts. GCP Clusters and Multi-region clusters require one VPC peering connection for each region. MongoDB nodes can use only the peering connection that resides in the same region as the nodes to communicate with the peered VPC.", + MarkdownDescription: "Physical location of your MongoDB cluster nodes. The region you choose can affect network latency for clients accessing your databases. The region name is only returned in the response for single-region clusters. When MongoDB Cloud deploys a dedicated cluster, it checks if a VPC or VPC connection exists for that provider and region. If not, MongoDB Cloud creates them as part of the deployment. It assigns the VPC a Classless Inter-Domain Routing (CIDR) block. To limit a new VPC peering connection to one Classless Inter-Domain Routing (CIDR) block and region, create the connection first. Deploy the cluster after the connection starts. GCP Clusters and Multi-region clusters require one VPC peering connection for each region. MongoDB nodes can use only the peering connection that resides in the same region as the nodes to communicate with the peered VPC.", + }, + }, + CustomType: RegionConfigsType{ + ObjectType: types.ObjectType{ + AttrTypes: RegionConfigsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "Hardware specifications for nodes set for a given region. Each **regionConfigs** object describes the region's priority in elections and the number and type of MongoDB nodes that MongoDB Cloud deploys to the region. Each **regionConfigs** object must have either an **analyticsSpecs** object, **electableSpecs** object, or **readOnlySpecs** object. Tenant clusters only require **electableSpecs. Dedicated** clusters can specify any of these specifications, but must have at least one **electableSpecs** object within a **replicationSpec**.\n\n**Example:**\n\nIf you set `\"replicationSpecs[n].regionConfigs[m].analyticsSpecs.instanceSize\" : \"M30\"`, set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : `\"M30\"` if you have electable nodes and `\"replicationSpecs[n].regionConfigs[m].readOnlySpecs.instanceSize\" : `\"M30\"` if you have read-only nodes.", + MarkdownDescription: "Hardware specifications for nodes set for a given region. Each **regionConfigs** object describes the region's priority in elections and the number and type of MongoDB nodes that MongoDB Cloud deploys to the region. Each **regionConfigs** object must have either an **analyticsSpecs** object, **electableSpecs** object, or **readOnlySpecs** object. Tenant clusters only require **electableSpecs. Dedicated** clusters can specify any of these specifications, but must have at least one **electableSpecs** object within a **replicationSpec**.\n\n**Example:**\n\nIf you set `\"replicationSpecs[n].regionConfigs[m].analyticsSpecs.instanceSize\" : \"M30\"`, set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : `\"M30\"` if you have electable nodes and `\"replicationSpecs[n].regionConfigs[m].readOnlySpecs.instanceSize\" : `\"M30\"` if you have read-only nodes.", + }, + "zone_id": schema.StringAttribute{ + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. This value can be used to configure Global Cluster backup policies.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. This value can be used to configure Global Cluster backup policies.", + }, + "zone_name": schema.StringAttribute{ + Computed: true, + Description: "Human-readable label that describes the zone this shard belongs to in a Global Cluster. Provide this value only if \"clusterType\" : \"GEOSHARDED\" but not \"selfManagedSharding\" : true.", + MarkdownDescription: "Human-readable label that describes the zone this shard belongs to in a Global Cluster. Provide this value only if \"clusterType\" : \"GEOSHARDED\" but not \"selfManagedSharding\" : true.", + }, + }, + CustomType: ReplicationSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ReplicationSpecsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of settings that configure your cluster regions. This array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations.", + MarkdownDescription: "List of settings that configure your cluster regions. This array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations.", + }, + "root_cert_type": schema.StringAttribute{ + Computed: true, + Description: "Root Certificate Authority that MongoDB Cloud cluster uses. MongoDB Cloud supports Internet Security Research Group.", + MarkdownDescription: "Root Certificate Authority that MongoDB Cloud cluster uses. MongoDB Cloud supports Internet Security Research Group.", + }, + "state_name": schema.StringAttribute{ + Computed: true, + Description: "Human-readable label that indicates the current operating condition of this cluster.", + MarkdownDescription: "Human-readable label that indicates the current operating condition of this cluster.", + }, + "tags": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Computed: true, + Description: "Constant that defines the set of the tag. For example, `environment` in the `environment : production` tag.", + MarkdownDescription: "Constant that defines the set of the tag. For example, `environment` in the `environment : production` tag.", + }, + "value": schema.StringAttribute{ + Computed: true, + Description: "Variable that belongs to the set of the tag. For example, `production` in the `environment : production` tag.", + MarkdownDescription: "Variable that belongs to the set of the tag. For example, `production` in the `environment : production` tag.", + }, + }, + CustomType: TagsType{ + ObjectType: types.ObjectType{ + AttrTypes: TagsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster.", + MarkdownDescription: "List that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster.", + }, + "termination_protection_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether termination protection is enabled on the cluster. If set to `true`, MongoDB Cloud won't delete the cluster. If set to `false`, MongoDB Cloud will delete the cluster.", + MarkdownDescription: "Flag that indicates whether termination protection is enabled on the cluster. If set to `true`, MongoDB Cloud won't delete the cluster. If set to `false`, MongoDB Cloud will delete the cluster.", + }, + "version_release_system": schema.StringAttribute{ + Computed: true, + Description: "Method by which the cluster maintains the MongoDB versions. If value is `CONTINUOUS`, you must not specify **mongoDBMajorVersion**.", + MarkdownDescription: "Method by which the cluster maintains the MongoDB versions. If value is `CONTINUOUS`, you must not specify **mongoDBMajorVersion**.", + }, + }, + } +} + +type ModelDS struct { + Labels types.List `tfsdk:"labels"` + Tags types.List `tfsdk:"tags"` + ReplicationSpecs types.List `tfsdk:"replication_specs"` + Links types.List `tfsdk:"links"` + CreateDate types.String `tfsdk:"create_date"` + ClusterName types.String `tfsdk:"cluster_name"` + ConfigServerType types.String `tfsdk:"config_server_type"` + VersionReleaseSystem types.String `tfsdk:"version_release_system"` + AcceptDataRisksAndForceReplicaSetReconfig types.String `tfsdk:"accept_data_risks_and_force_replica_set_reconfig"` + DiskWarmingMode types.String `tfsdk:"disk_warming_mode"` + EncryptionAtRestProvider types.String `tfsdk:"encryption_at_rest_provider"` + FeatureCompatibilityVersion types.String `tfsdk:"feature_compatibility_version"` + FeatureCompatibilityVersionExpirationDate types.String `tfsdk:"feature_compatibility_version_expiration_date"` + StateName types.String `tfsdk:"state_name"` + GroupId types.String `tfsdk:"group_id"` + Id types.String `tfsdk:"id"` + ClusterType types.String `tfsdk:"cluster_type"` + ConfigServerManagementMode types.String `tfsdk:"config_server_management_mode"` + RootCertType types.String `tfsdk:"root_cert_type"` + MongoDbmajorVersion types.String `tfsdk:"mongo_dbmajor_version"` + MongoDbversion types.String `tfsdk:"mongo_dbversion"` + Name types.String `tfsdk:"name"` + ReplicaSetScalingStrategy types.String `tfsdk:"replica_set_scaling_strategy"` + ConnectionStrings ConnectionStringsValue `tfsdk:"connection_strings"` + MongoDbemployeeAccessGrant MongoDbemployeeAccessGrantValue `tfsdk:"mongo_dbemployee_access_grant"` + BiConnector BiConnectorValue `tfsdk:"bi_connector"` + PitEnabled types.Bool `tfsdk:"pit_enabled"` + RedactClientLogData types.Bool `tfsdk:"redact_client_log_data"` + Paused types.Bool `tfsdk:"paused"` + GlobalClusterSelfManagedSharding types.Bool `tfsdk:"global_cluster_self_managed_sharding"` + BackupEnabled types.Bool `tfsdk:"backup_enabled"` + TerminationProtectionEnabled types.Bool `tfsdk:"termination_protection_enabled"` +} diff --git a/internal/service/advancedclustertpf/main_test.go b/internal/service/advancedclustertpf/main_test.go new file mode 100644 index 0000000000..4f1d4d3054 --- /dev/null +++ b/internal/service/advancedclustertpf/main_test.go @@ -0,0 +1,15 @@ +package advancedclustertpf_test + +import ( + "os" + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +func TestMain(m *testing.M) { + cleanup := acc.SetupSharedResources() + exitCode := m.Run() + cleanup() + os.Exit(exitCode) +} diff --git a/internal/service/advancedclustertpf/model.go b/internal/service/advancedclustertpf/model.go new file mode 100644 index 0000000000..61baed137c --- /dev/null +++ b/internal/service/advancedclustertpf/model.go @@ -0,0 +1,16 @@ +package advancedclustertpf + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "go.mongodb.org/atlas-sdk/v20240805004/admin" +) + +func NewTFModel(ctx context.Context, apiResp *admin.ClusterDescription20240805) (*TFModel, diag.Diagnostics) { + return &TFModel{}, nil +} + +func NewAtlasReq(ctx context.Context, plan *TFModel) (*admin.ClusterDescription20240805, diag.Diagnostics) { + return &admin.ClusterDescription20240805{}, nil +} diff --git a/internal/service/advancedclustertpf/plural_data_source.go b/internal/service/advancedclustertpf/plural_data_source.go new file mode 100644 index 0000000000..cd68ed23bc --- /dev/null +++ b/internal/service/advancedclustertpf/plural_data_source.go @@ -0,0 +1,31 @@ +package advancedclustertpf + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" +) + +var _ datasource.DataSource = &pluralDS{} +var _ datasource.DataSourceWithConfigure = &pluralDS{} + +func PluralDataSource() datasource.DataSource { + return &pluralDS{ + DSCommon: config.DSCommon{ + DataSourceName: fmt.Sprintf("%ss", resourceName), + }, + } +} + +type pluralDS struct { + config.DSCommon +} + +func (d *pluralDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = PluralDataSourceSchema(ctx) +} + +func (d *pluralDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { +} diff --git a/internal/service/advancedclustertpf/plural_data_source_schema.go b/internal/service/advancedclustertpf/plural_data_source_schema.go new file mode 100644 index 0000000000..da3acde60e --- /dev/null +++ b/internal/service/advancedclustertpf/plural_data_source_schema.go @@ -0,0 +1,2980 @@ +// Code generated by terraform-plugin-framework-generator DO NOT EDIT. + +package advancedclustertpf + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func PluralDataSourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "group_id": schema.StringAttribute{ + Required: true, + Description: "Unique 24-hexadecimal digit string that identifies your project. Use the [/groups](#tag/Projects/operation/listProjects) endpoint to retrieve all projects to which the authenticated user has access.\n\n**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies your project. Use the [/groups](#tag/Projects/operation/listProjects) endpoint to retrieve all projects to which the authenticated user has access.\n\n**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups.", + Validators: []validator.String{ + stringvalidator.LengthBetween(24, 24), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-f0-9]{24})$"), ""), + }, + }, + "include_count": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether the response returns the total number of items (**totalCount**) in the response.", + MarkdownDescription: "Flag that indicates whether the response returns the total number of items (**totalCount**) in the response.", + }, + "include_deleted_with_retained_backups": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether to return Clusters with retain backups.", + MarkdownDescription: "Flag that indicates whether to return Clusters with retain backups.", + }, + "items_per_page": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of items that the response returns per page.", + MarkdownDescription: "Number of items that the response returns per page.", + Validators: []validator.Int64{ + int64validator.Between(1, 500), + }, + }, + "links": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "href": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + "rel": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + }, + CustomType: LinksType{ + ObjectType: types.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + MarkdownDescription: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + }, + "page_num": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of the page that displays the current set of the total objects that the response returns.", + MarkdownDescription: "Number of the page that displays the current set of the total objects that the response returns.", + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, + }, + "results": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "accept_data_risks_and_force_replica_set_reconfig": schema.StringAttribute{ + Computed: true, + Description: "If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set **acceptDataRisksAndForceReplicaSetReconfig** to the current date.", + MarkdownDescription: "If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set **acceptDataRisksAndForceReplicaSetReconfig** to the current date.", + }, + "backup_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the cluster can perform backups. If set to `true`, the cluster can perform backups. You must set this value to `true` for NVMe clusters. Backup uses [Cloud Backups](https://docs.atlas.mongodb.com/backup/cloud-backup/overview/) for dedicated clusters and [Shared Cluster Backups](https://docs.atlas.mongodb.com/backup/shared-tier/overview/) for tenant clusters. If set to `false`, the cluster doesn't use backups.", + MarkdownDescription: "Flag that indicates whether the cluster can perform backups. If set to `true`, the cluster can perform backups. You must set this value to `true` for NVMe clusters. Backup uses [Cloud Backups](https://docs.atlas.mongodb.com/backup/cloud-backup/overview/) for dedicated clusters and [Shared Cluster Backups](https://docs.atlas.mongodb.com/backup/shared-tier/overview/) for tenant clusters. If set to `false`, the cluster doesn't use backups.", + }, + "bi_connector": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether MongoDB Connector for Business Intelligence is enabled on the specified cluster.", + MarkdownDescription: "Flag that indicates whether MongoDB Connector for Business Intelligence is enabled on the specified cluster.", + }, + "read_preference": schema.StringAttribute{ + Computed: true, + Description: "Data source node designated for the MongoDB Connector for Business Intelligence on MongoDB Cloud. The MongoDB Connector for Business Intelligence on MongoDB Cloud reads data from the primary, secondary, or analytics node based on your read preferences. Defaults to `ANALYTICS` node, or `SECONDARY` if there are no `ANALYTICS` nodes.", + MarkdownDescription: "Data source node designated for the MongoDB Connector for Business Intelligence on MongoDB Cloud. The MongoDB Connector for Business Intelligence on MongoDB Cloud reads data from the primary, secondary, or analytics node based on your read preferences. Defaults to `ANALYTICS` node, or `SECONDARY` if there are no `ANALYTICS` nodes.", + }, + }, + CustomType: BiConnectorType{ + ObjectType: types.ObjectType{ + AttrTypes: BiConnectorValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Settings needed to configure the MongoDB Connector for Business Intelligence for this cluster.", + MarkdownDescription: "Settings needed to configure the MongoDB Connector for Business Intelligence for this cluster.", + }, + "cluster_type": schema.StringAttribute{ + Computed: true, + Description: "Configuration of nodes that comprise the cluster.", + MarkdownDescription: "Configuration of nodes that comprise the cluster.", + }, + "config_server_management_mode": schema.StringAttribute{ + Computed: true, + Description: "Config Server Management Mode for creating or updating a sharded cluster.\n\nWhen configured as ATLAS_MANAGED, atlas may automatically switch the cluster's config server type for optimal performance and savings.\n\nWhen configured as FIXED_TO_DEDICATED, the cluster will always use a dedicated config server.", + MarkdownDescription: "Config Server Management Mode for creating or updating a sharded cluster.\n\nWhen configured as ATLAS_MANAGED, atlas may automatically switch the cluster's config server type for optimal performance and savings.\n\nWhen configured as FIXED_TO_DEDICATED, the cluster will always use a dedicated config server.", + }, + "config_server_type": schema.StringAttribute{ + Computed: true, + Description: "Describes a sharded cluster's config server type.", + MarkdownDescription: "Describes a sharded cluster's config server type.", + }, + "connection_strings": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "aws_private_link": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to MongoDB Cloud through the interface endpoint that the key names.", + MarkdownDescription: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to MongoDB Cloud through the interface endpoint that the key names.", + }, + "aws_private_link_srv": schema.MapAttribute{ + ElementType: types.StringType, + Computed: true, + Description: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to Atlas through the interface endpoint that the key names.", + MarkdownDescription: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to Atlas through the interface endpoint that the key names.", + }, + "private": schema.StringAttribute{ + Computed: true, + Description: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter once someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn't, use connectionStrings.private. For Amazon Web Services (AWS) clusters, this resource returns this parameter only if you enable custom DNS.", + MarkdownDescription: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter once someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn't, use connectionStrings.private. For Amazon Web Services (AWS) clusters, this resource returns this parameter only if you enable custom DNS.", + }, + "private_endpoint": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "connection_string": schema.StringAttribute{ + Computed: true, + Description: "Private endpoint-aware connection string that uses the `mongodb://` protocol to connect to MongoDB Cloud through a private endpoint.", + MarkdownDescription: "Private endpoint-aware connection string that uses the `mongodb://` protocol to connect to MongoDB Cloud through a private endpoint.", + }, + "endpoints": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "endpoint_id": schema.StringAttribute{ + Computed: true, + Description: "Unique string that the cloud provider uses to identify the private endpoint.", + MarkdownDescription: "Unique string that the cloud provider uses to identify the private endpoint.", + }, + "provider_name": schema.StringAttribute{ + Computed: true, + Description: "Cloud provider in which MongoDB Cloud deploys the private endpoint.", + MarkdownDescription: "Cloud provider in which MongoDB Cloud deploys the private endpoint.", + }, + "region": schema.StringAttribute{ + Computed: true, + Description: "Region where the private endpoint is deployed.", + MarkdownDescription: "Region where the private endpoint is deployed.", + }, + }, + CustomType: EndpointsType{ + ObjectType: types.ObjectType{ + AttrTypes: EndpointsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List that contains the private endpoints through which you connect to MongoDB Cloud when you use **connectionStrings.privateEndpoint[n].connectionString** or **connectionStrings.privateEndpoint[n].srvConnectionString**.", + MarkdownDescription: "List that contains the private endpoints through which you connect to MongoDB Cloud when you use **connectionStrings.privateEndpoint[n].connectionString** or **connectionStrings.privateEndpoint[n].srvConnectionString**.", + }, + "srv_connection_string": schema.StringAttribute{ + Computed: true, + Description: "Private endpoint-aware connection string that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. The `mongodb+srv` protocol tells the driver to look up the seed list of hosts in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application supports it. If it doesn't, use connectionStrings.privateEndpoint[n].connectionString.", + MarkdownDescription: "Private endpoint-aware connection string that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. The `mongodb+srv` protocol tells the driver to look up the seed list of hosts in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application supports it. If it doesn't, use connectionStrings.privateEndpoint[n].connectionString.", + }, + "srv_shard_optimized_connection_string": schema.StringAttribute{ + Computed: true, + Description: "Private endpoint-aware connection string optimized for sharded clusters that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application and Atlas cluster supports it. If it doesn't, use and consult the documentation for connectionStrings.privateEndpoint[n].srvConnectionString.", + MarkdownDescription: "Private endpoint-aware connection string optimized for sharded clusters that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application and Atlas cluster supports it. If it doesn't, use and consult the documentation for connectionStrings.privateEndpoint[n].srvConnectionString.", + }, + "type": schema.StringAttribute{ + Computed: true, + Description: "MongoDB process type to which your application connects. Use `MONGOD` for replica sets and `MONGOS` for sharded clusters.", + MarkdownDescription: "MongoDB process type to which your application connects. Use `MONGOD` for replica sets and `MONGOS` for sharded clusters.", + }, + }, + CustomType: PrivateEndpointType{ + ObjectType: types.ObjectType{ + AttrTypes: PrivateEndpointValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of private endpoint-aware connection strings that you can use to connect to this cluster through a private endpoint. This parameter returns only if you deployed a private endpoint to all regions to which you deployed this clusters' nodes.", + MarkdownDescription: "List of private endpoint-aware connection strings that you can use to connect to this cluster through a private endpoint. This parameter returns only if you deployed a private endpoint to all regions to which you deployed this clusters' nodes.", + }, + "private_srv": schema.StringAttribute{ + Computed: true, + Description: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter when someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your driver supports it. If it doesn't, use `connectionStrings.private`. For Amazon Web Services (AWS) clusters, this parameter returns only if you [enable custom DNS](https://docs.atlas.mongodb.com/reference/api/aws-custom-dns-update/).", + MarkdownDescription: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter when someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your driver supports it. If it doesn't, use `connectionStrings.private`. For Amazon Web Services (AWS) clusters, this parameter returns only if you [enable custom DNS](https://docs.atlas.mongodb.com/reference/api/aws-custom-dns-update/).", + }, + "standard": schema.StringAttribute{ + Computed: true, + Description: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb://` protocol.", + MarkdownDescription: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb://` protocol.", + }, + "standard_srv": schema.StringAttribute{ + Computed: true, + Description: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb+srv://` protocol.", + MarkdownDescription: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb+srv://` protocol.", + }, + }, + CustomType: ConnectionStringsType{ + ObjectType: types.ObjectType{ + AttrTypes: ConnectionStringsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Collection of Uniform Resource Locators that point to the MongoDB database.", + MarkdownDescription: "Collection of Uniform Resource Locators that point to the MongoDB database.", + }, + "create_date": schema.StringAttribute{ + Computed: true, + Description: "Date and time when MongoDB Cloud created this cluster. This parameter expresses its value in ISO 8601 format in UTC.", + MarkdownDescription: "Date and time when MongoDB Cloud created this cluster. This parameter expresses its value in ISO 8601 format in UTC.", + }, + "disk_warming_mode": schema.StringAttribute{ + Computed: true, + Description: "Disk warming mode selection.", + MarkdownDescription: "Disk warming mode selection.", + }, + "encryption_at_rest_provider": schema.StringAttribute{ + Computed: true, + Description: "Cloud service provider that manages your customer keys to provide an additional layer of encryption at rest for the cluster. To enable customer key management for encryption at rest, the cluster **replicationSpecs[n].regionConfigs[m].{type}Specs.instanceSize** setting must be `M10` or higher and `\"backupEnabled\" : false` or omitted entirely.", + MarkdownDescription: "Cloud service provider that manages your customer keys to provide an additional layer of encryption at rest for the cluster. To enable customer key management for encryption at rest, the cluster **replicationSpecs[n].regionConfigs[m].{type}Specs.instanceSize** setting must be `M10` or higher and `\"backupEnabled\" : false` or omitted entirely.", + }, + "feature_compatibility_version": schema.StringAttribute{ + Computed: true, + Description: "Feature compatibility version of the cluster.", + MarkdownDescription: "Feature compatibility version of the cluster.", + }, + "feature_compatibility_version_expiration_date": schema.StringAttribute{ + Computed: true, + Description: "Feature compatibility version expiration date.", + MarkdownDescription: "Feature compatibility version expiration date.", + }, + "global_cluster_self_managed_sharding": schema.BoolAttribute{ + Computed: true, + Description: "Set this field to configure the Sharding Management Mode when creating a new Global Cluster.\n\nWhen set to false, the management mode is set to Atlas-Managed Sharding. This mode fully manages the sharding of your Global Cluster and is built to provide a seamless deployment experience.\n\nWhen set to true, the management mode is set to Self-Managed Sharding. This mode leaves the management of shards in your hands and is built to provide an advanced and flexible deployment experience.\n\nThis setting cannot be changed once the cluster is deployed.", + MarkdownDescription: "Set this field to configure the Sharding Management Mode when creating a new Global Cluster.\n\nWhen set to false, the management mode is set to Atlas-Managed Sharding. This mode fully manages the sharding of your Global Cluster and is built to provide a seamless deployment experience.\n\nWhen set to true, the management mode is set to Self-Managed Sharding. This mode leaves the management of shards in your hands and is built to provide an advanced and flexible deployment experience.\n\nThis setting cannot be changed once the cluster is deployed.", + }, + "group_id": schema.StringAttribute{ + Computed: true, + Description: "Unique 24-hexadecimal character string that identifies the project.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies the project.", + }, + "id": schema.StringAttribute{ + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the cluster.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the cluster.", + }, + "labels": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Computed: true, + Description: "Key applied to tag and categorize this component.", + MarkdownDescription: "Key applied to tag and categorize this component.", + }, + "value": schema.StringAttribute{ + Computed: true, + Description: "Value set to the Key applied to tag and categorize this component.", + MarkdownDescription: "Value set to the Key applied to tag and categorize this component.", + }, + }, + CustomType: LabelsType{ + ObjectType: types.ObjectType{ + AttrTypes: LabelsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "Collection of key-value pairs between 1 to 255 characters in length that tag and categorize the cluster. The MongoDB Cloud console doesn't display your labels.\n\nCluster labels are deprecated and will be removed in a future release. We strongly recommend that you use [resource tags](https://dochub.mongodb.org/core/add-cluster-tag-atlas) instead.", + MarkdownDescription: "Collection of key-value pairs between 1 to 255 characters in length that tag and categorize the cluster. The MongoDB Cloud console doesn't display your labels.\n\nCluster labels are deprecated and will be removed in a future release. We strongly recommend that you use [resource tags](https://dochub.mongodb.org/core/add-cluster-tag-atlas) instead.", + DeprecationMessage: "This attribute is deprecated.", + }, + "links": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "href": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + "rel": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + }, + CustomType: LinksType{ + ObjectType: types.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + MarkdownDescription: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + }, + "mongo_dbemployee_access_grant": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "expiration_time": schema.StringAttribute{ + Computed: true, + Description: "Expiration date for the employee access grant.", + MarkdownDescription: "Expiration date for the employee access grant.", + }, + "grant_type": schema.StringAttribute{ + Computed: true, + Description: "Level of access to grant to MongoDB Employees.", + MarkdownDescription: "Level of access to grant to MongoDB Employees.", + }, + "links": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "href": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + "rel": schema.StringAttribute{ + Computed: true, + Description: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + }, + CustomType: LinksType{ + ObjectType: types.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + MarkdownDescription: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + }, + }, + CustomType: MongoDbemployeeAccessGrantType{ + ObjectType: types.ObjectType{ + AttrTypes: MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "MongoDB employee granted access level and expiration for a cluster.", + MarkdownDescription: "MongoDB employee granted access level and expiration for a cluster.", + }, + "mongo_dbmajor_version": schema.StringAttribute{ + Computed: true, + Description: "MongoDB major version of the cluster.\n\nOn creation: Choose from the available versions of MongoDB, or leave unspecified for the current recommended default in the MongoDB Cloud platform. The recommended version is a recent Long Term Support version. The default is not guaranteed to be the most recently released version throughout the entire release cycle. For versions available in a specific project, see the linked documentation or use the API endpoint for [project LTS versions endpoint](#tag/Projects/operation/getProjectLTSVersions).\n\n On update: Increase version only by 1 major version at a time. If the cluster is pinned to a MongoDB feature compatibility version exactly one major version below the current MongoDB version, the MongoDB version can be downgraded to the previous major version.", + MarkdownDescription: "MongoDB major version of the cluster.\n\nOn creation: Choose from the available versions of MongoDB, or leave unspecified for the current recommended default in the MongoDB Cloud platform. The recommended version is a recent Long Term Support version. The default is not guaranteed to be the most recently released version throughout the entire release cycle. For versions available in a specific project, see the linked documentation or use the API endpoint for [project LTS versions endpoint](#tag/Projects/operation/getProjectLTSVersions).\n\n On update: Increase version only by 1 major version at a time. If the cluster is pinned to a MongoDB feature compatibility version exactly one major version below the current MongoDB version, the MongoDB version can be downgraded to the previous major version.", + }, + "mongo_dbversion": schema.StringAttribute{ + Computed: true, + Description: "Version of MongoDB that the cluster runs.", + MarkdownDescription: "Version of MongoDB that the cluster runs.", + }, + "name": schema.StringAttribute{ + Computed: true, + Description: "Human-readable label that identifies the cluster.", + MarkdownDescription: "Human-readable label that identifies the cluster.", + }, + "paused": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the cluster is paused.", + MarkdownDescription: "Flag that indicates whether the cluster is paused.", + }, + "pit_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the cluster uses continuous cloud backups.", + MarkdownDescription: "Flag that indicates whether the cluster uses continuous cloud backups.", + }, + "redact_client_log_data": schema.BoolAttribute{ + Computed: true, + Description: "Enable or disable log redaction.\n\nThis setting configures the ``mongod`` or ``mongos`` to redact any document field contents from a message accompanying a given log event before logging. This prevents the program from writing potentially sensitive data stored on the database to the diagnostic log. Metadata such as error or operation codes, line numbers, and source file names are still visible in the logs.\n\nUse ``redactClientLogData`` in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements.\n\n*Note*: changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated.", + MarkdownDescription: "Enable or disable log redaction.\n\nThis setting configures the ``mongod`` or ``mongos`` to redact any document field contents from a message accompanying a given log event before logging. This prevents the program from writing potentially sensitive data stored on the database to the diagnostic log. Metadata such as error or operation codes, line numbers, and source file names are still visible in the logs.\n\nUse ``redactClientLogData`` in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements.\n\n*Note*: changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated.", + }, + "replica_set_scaling_strategy": schema.StringAttribute{ + Computed: true, + Description: "Set this field to configure the replica set scaling mode for your cluster.\n\nBy default, Atlas scales under WORKLOAD_TYPE. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes.\n\nWhen configured as SEQUENTIAL, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads.\n\nWhen configured as NODE_TYPE, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads.", + MarkdownDescription: "Set this field to configure the replica set scaling mode for your cluster.\n\nBy default, Atlas scales under WORKLOAD_TYPE. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes.\n\nWhen configured as SEQUENTIAL, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads.\n\nWhen configured as NODE_TYPE, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads.", + }, + "replication_specs": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the replication object for a shard in a Cluster. If you include existing shard replication configurations in the request, you must specify this parameter. If you add a new shard to an existing Cluster, you may specify this parameter. The request deletes any existing shards in the Cluster that you exclude from the request. This corresponds to Shard ID displayed in the UI.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the replication object for a shard in a Cluster. If you include existing shard replication configurations in the request, you must specify this parameter. If you add a new shard to an existing Cluster, you may specify this parameter. The request deletes any existing shards in the Cluster that you exclude from the request. This corresponds to Shard ID displayed in the UI.", + }, + "region_configs": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "analytics_auto_scaling": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + MarkdownDescription: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + }, + "max_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "min_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "scale_down_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + MarkdownDescription: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + }, + }, + CustomType: ComputeType{ + ObjectType: types.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles CPU scaling.", + MarkdownDescription: "Options that determine how this cluster handles CPU scaling.", + }, + "disk_gb": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + MarkdownDescription: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + }, + }, + CustomType: DiskGbType{ + ObjectType: types.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Setting that enables disk auto-scaling.", + MarkdownDescription: "Setting that enables disk auto-scaling.", + }, + }, + CustomType: AnalyticsAutoScalingType{ + ObjectType: types.ObjectType{ + AttrTypes: AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles resource scaling.", + MarkdownDescription: "Options that determine how this cluster handles resource scaling.", + }, + "analytics_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + }, + "instance_size": schema.StringAttribute{ + Computed: true, + Description: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + MarkdownDescription: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + }, + "node_count": schema.Int64Attribute{ + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: AnalyticsSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: AnalyticsSpecsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + MarkdownDescription: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + }, + "auto_scaling": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + MarkdownDescription: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + }, + "max_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "min_instance_size": schema.StringAttribute{ + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "scale_down_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + MarkdownDescription: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + }, + }, + CustomType: ComputeType{ + ObjectType: types.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles CPU scaling.", + MarkdownDescription: "Options that determine how this cluster handles CPU scaling.", + }, + "disk_gb": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + MarkdownDescription: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + }, + }, + CustomType: DiskGbType{ + ObjectType: types.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Setting that enables disk auto-scaling.", + MarkdownDescription: "Setting that enables disk auto-scaling.", + }, + }, + CustomType: AutoScalingType{ + ObjectType: types.ObjectType{ + AttrTypes: AutoScalingValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Options that determine how this cluster handles resource scaling.", + MarkdownDescription: "Options that determine how this cluster handles resource scaling.", + }, + "backing_provider_name": schema.StringAttribute{ + Computed: true, + Description: "Cloud service provider on which MongoDB Cloud provisioned the multi-tenant cluster. The resource returns this parameter when **providerName** is `TENANT` and **electableSpecs.instanceSize** is `M0`, `M2` or `M5`.", + MarkdownDescription: "Cloud service provider on which MongoDB Cloud provisioned the multi-tenant cluster. The resource returns this parameter when **providerName** is `TENANT` and **electableSpecs.instanceSize** is `M0`, `M2` or `M5`.", + }, + "electable_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + }, + "instance_size": schema.StringAttribute{ + Computed: true, + Description: "Hardware specification for the instances in this M0/M2/M5 tier cluster.", + MarkdownDescription: "Hardware specification for the instances in this M0/M2/M5 tier cluster.", + }, + "node_count": schema.Int64Attribute{ + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: ElectableSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ElectableSpecsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Hardware specifications for all electable nodes deployed in the region. Electable nodes can become the primary and can enable local reads. If you don't specify this option, MongoDB Cloud deploys no electable nodes to the region.", + MarkdownDescription: "Hardware specifications for all electable nodes deployed in the region. Electable nodes can become the primary and can enable local reads. If you don't specify this option, MongoDB Cloud deploys no electable nodes to the region.", + }, + "priority": schema.Int64Attribute{ + Computed: true, + Description: "Precedence is given to this region when a primary election occurs. If your **regionConfigs** has only **readOnlySpecs**, **analyticsSpecs**, or both, set this value to `0`. If you have multiple **regionConfigs** objects (your cluster is multi-region or multi-cloud), they must have priorities in descending order. The highest priority is `7`.\n\n**Example:** If you have three regions, their priorities would be `7`, `6`, and `5` respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be `4` and `3` respectively.", + MarkdownDescription: "Precedence is given to this region when a primary election occurs. If your **regionConfigs** has only **readOnlySpecs**, **analyticsSpecs**, or both, set this value to `0`. If you have multiple **regionConfigs** objects (your cluster is multi-region or multi-cloud), they must have priorities in descending order. The highest priority is `7`.\n\n**Example:** If you have three regions, their priorities would be `7`, `6`, and `5` respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be `4` and `3` respectively.", + }, + "provider_name": schema.StringAttribute{ + Computed: true, + Description: "Cloud service provider on which MongoDB Cloud provisions the hosts. Set dedicated clusters to `AWS`, `GCP`, `AZURE` or `TENANT`.", + MarkdownDescription: "Cloud service provider on which MongoDB Cloud provisions the hosts. Set dedicated clusters to `AWS`, `GCP`, `AZURE` or `TENANT`.", + }, + "read_only_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + }, + "instance_size": schema.StringAttribute{ + Computed: true, + Description: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + MarkdownDescription: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + }, + "node_count": schema.Int64Attribute{ + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: ReadOnlySpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ReadOnlySpecsValue{}.AttributeTypes(ctx), + }, + }, + Computed: true, + Description: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + MarkdownDescription: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + }, + "region_name": schema.StringAttribute{ + Computed: true, + Description: "Physical location of your MongoDB cluster nodes. The region you choose can affect network latency for clients accessing your databases. The region name is only returned in the response for single-region clusters. When MongoDB Cloud deploys a dedicated cluster, it checks if a VPC or VPC connection exists for that provider and region. If not, MongoDB Cloud creates them as part of the deployment. It assigns the VPC a Classless Inter-Domain Routing (CIDR) block. To limit a new VPC peering connection to one Classless Inter-Domain Routing (CIDR) block and region, create the connection first. Deploy the cluster after the connection starts. GCP Clusters and Multi-region clusters require one VPC peering connection for each region. MongoDB nodes can use only the peering connection that resides in the same region as the nodes to communicate with the peered VPC.", + MarkdownDescription: "Physical location of your MongoDB cluster nodes. The region you choose can affect network latency for clients accessing your databases. The region name is only returned in the response for single-region clusters. When MongoDB Cloud deploys a dedicated cluster, it checks if a VPC or VPC connection exists for that provider and region. If not, MongoDB Cloud creates them as part of the deployment. It assigns the VPC a Classless Inter-Domain Routing (CIDR) block. To limit a new VPC peering connection to one Classless Inter-Domain Routing (CIDR) block and region, create the connection first. Deploy the cluster after the connection starts. GCP Clusters and Multi-region clusters require one VPC peering connection for each region. MongoDB nodes can use only the peering connection that resides in the same region as the nodes to communicate with the peered VPC.", + }, + }, + CustomType: RegionConfigsType{ + ObjectType: types.ObjectType{ + AttrTypes: RegionConfigsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "Hardware specifications for nodes set for a given region. Each **regionConfigs** object describes the region's priority in elections and the number and type of MongoDB nodes that MongoDB Cloud deploys to the region. Each **regionConfigs** object must have either an **analyticsSpecs** object, **electableSpecs** object, or **readOnlySpecs** object. Tenant clusters only require **electableSpecs. Dedicated** clusters can specify any of these specifications, but must have at least one **electableSpecs** object within a **replicationSpec**.\n\n**Example:**\n\nIf you set `\"replicationSpecs[n].regionConfigs[m].analyticsSpecs.instanceSize\" : \"M30\"`, set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : `\"M30\"` if you have electable nodes and `\"replicationSpecs[n].regionConfigs[m].readOnlySpecs.instanceSize\" : `\"M30\"` if you have read-only nodes.", + MarkdownDescription: "Hardware specifications for nodes set for a given region. Each **regionConfigs** object describes the region's priority in elections and the number and type of MongoDB nodes that MongoDB Cloud deploys to the region. Each **regionConfigs** object must have either an **analyticsSpecs** object, **electableSpecs** object, or **readOnlySpecs** object. Tenant clusters only require **electableSpecs. Dedicated** clusters can specify any of these specifications, but must have at least one **electableSpecs** object within a **replicationSpec**.\n\n**Example:**\n\nIf you set `\"replicationSpecs[n].regionConfigs[m].analyticsSpecs.instanceSize\" : \"M30\"`, set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : `\"M30\"` if you have electable nodes and `\"replicationSpecs[n].regionConfigs[m].readOnlySpecs.instanceSize\" : `\"M30\"` if you have read-only nodes.", + }, + "zone_id": schema.StringAttribute{ + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. This value can be used to configure Global Cluster backup policies.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. This value can be used to configure Global Cluster backup policies.", + }, + "zone_name": schema.StringAttribute{ + Computed: true, + Description: "Human-readable label that describes the zone this shard belongs to in a Global Cluster. Provide this value only if \"clusterType\" : \"GEOSHARDED\" but not \"selfManagedSharding\" : true.", + MarkdownDescription: "Human-readable label that describes the zone this shard belongs to in a Global Cluster. Provide this value only if \"clusterType\" : \"GEOSHARDED\" but not \"selfManagedSharding\" : true.", + }, + }, + CustomType: ReplicationSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ReplicationSpecsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of settings that configure your cluster regions. This array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations.", + MarkdownDescription: "List of settings that configure your cluster regions. This array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations.", + }, + "root_cert_type": schema.StringAttribute{ + Computed: true, + Description: "Root Certificate Authority that MongoDB Cloud cluster uses. MongoDB Cloud supports Internet Security Research Group.", + MarkdownDescription: "Root Certificate Authority that MongoDB Cloud cluster uses. MongoDB Cloud supports Internet Security Research Group.", + }, + "state_name": schema.StringAttribute{ + Computed: true, + Description: "Human-readable label that indicates the current operating condition of this cluster.", + MarkdownDescription: "Human-readable label that indicates the current operating condition of this cluster.", + }, + "tags": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Computed: true, + Description: "Constant that defines the set of the tag. For example, `environment` in the `environment : production` tag.", + MarkdownDescription: "Constant that defines the set of the tag. For example, `environment` in the `environment : production` tag.", + }, + "value": schema.StringAttribute{ + Computed: true, + Description: "Variable that belongs to the set of the tag. For example, `production` in the `environment : production` tag.", + MarkdownDescription: "Variable that belongs to the set of the tag. For example, `production` in the `environment : production` tag.", + }, + }, + CustomType: TagsType{ + ObjectType: types.ObjectType{ + AttrTypes: TagsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster.", + MarkdownDescription: "List that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster.", + }, + "termination_protection_enabled": schema.BoolAttribute{ + Computed: true, + Description: "Flag that indicates whether termination protection is enabled on the cluster. If set to `true`, MongoDB Cloud won't delete the cluster. If set to `false`, MongoDB Cloud will delete the cluster.", + MarkdownDescription: "Flag that indicates whether termination protection is enabled on the cluster. If set to `true`, MongoDB Cloud won't delete the cluster. If set to `false`, MongoDB Cloud will delete the cluster.", + }, + "version_release_system": schema.StringAttribute{ + Computed: true, + Description: "Method by which the cluster maintains the MongoDB versions. If value is `CONTINUOUS`, you must not specify **mongoDBMajorVersion**.", + MarkdownDescription: "Method by which the cluster maintains the MongoDB versions. If value is `CONTINUOUS`, you must not specify **mongoDBMajorVersion**.", + }, + }, + CustomType: ResultsType{ + ObjectType: types.ObjectType{ + AttrTypes: ResultsValue{}.AttributeTypes(ctx), + }, + }, + }, + Computed: true, + Description: "List of returned documents that MongoDB Cloud provides when completing this request.", + MarkdownDescription: "List of returned documents that MongoDB Cloud provides when completing this request.", + }, + "total_count": schema.Int64Attribute{ + Computed: true, + Description: "Total number of documents available. MongoDB Cloud omits this value if `includeCount` is set to `false`.", + MarkdownDescription: "Total number of documents available. MongoDB Cloud omits this value if `includeCount` is set to `false`.", + }, + }, + } +} + +type AdvancedClustersModel struct { + Links types.List `tfsdk:"links"` + Results types.List `tfsdk:"results"` + GroupId types.String `tfsdk:"group_id"` + ItemsPerPage types.Int64 `tfsdk:"items_per_page"` + PageNum types.Int64 `tfsdk:"page_num"` + TotalCount types.Int64 `tfsdk:"total_count"` + IncludeCount types.Bool `tfsdk:"include_count"` + IncludeDeletedWithRetainedBackups types.Bool `tfsdk:"include_deleted_with_retained_backups"` +} + +var _ basetypes.ObjectTypable = ResultsType{} + +type ResultsType struct { + basetypes.ObjectType +} + +func (t ResultsType) Equal(o attr.Type) bool { + other, ok := o.(ResultsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ResultsType) String() string { + return "ResultsType" +} + +func (t ResultsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + acceptDataRisksAndForceReplicaSetReconfigAttribute, ok := attributes["accept_data_risks_and_force_replica_set_reconfig"] + + if !ok { + diags.AddError( + "Attribute Missing", + `accept_data_risks_and_force_replica_set_reconfig is missing from object`) + + return nil, diags + } + + acceptDataRisksAndForceReplicaSetReconfigVal, ok := acceptDataRisksAndForceReplicaSetReconfigAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`accept_data_risks_and_force_replica_set_reconfig expected to be basetypes.StringValue, was: %T`, acceptDataRisksAndForceReplicaSetReconfigAttribute)) + } + + backupEnabledAttribute, ok := attributes["backup_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `backup_enabled is missing from object`) + + return nil, diags + } + + backupEnabledVal, ok := backupEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`backup_enabled expected to be basetypes.BoolValue, was: %T`, backupEnabledAttribute)) + } + + biConnectorAttribute, ok := attributes["bi_connector"] + + if !ok { + diags.AddError( + "Attribute Missing", + `bi_connector is missing from object`) + + return nil, diags + } + + biConnectorVal, ok := biConnectorAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`bi_connector expected to be basetypes.ObjectValue, was: %T`, biConnectorAttribute)) + } + + clusterTypeAttribute, ok := attributes["cluster_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cluster_type is missing from object`) + + return nil, diags + } + + clusterTypeVal, ok := clusterTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cluster_type expected to be basetypes.StringValue, was: %T`, clusterTypeAttribute)) + } + + configServerManagementModeAttribute, ok := attributes["config_server_management_mode"] + + if !ok { + diags.AddError( + "Attribute Missing", + `config_server_management_mode is missing from object`) + + return nil, diags + } + + configServerManagementModeVal, ok := configServerManagementModeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`config_server_management_mode expected to be basetypes.StringValue, was: %T`, configServerManagementModeAttribute)) + } + + configServerTypeAttribute, ok := attributes["config_server_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `config_server_type is missing from object`) + + return nil, diags + } + + configServerTypeVal, ok := configServerTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`config_server_type expected to be basetypes.StringValue, was: %T`, configServerTypeAttribute)) + } + + connectionStringsAttribute, ok := attributes["connection_strings"] + + if !ok { + diags.AddError( + "Attribute Missing", + `connection_strings is missing from object`) + + return nil, diags + } + + connectionStringsVal, ok := connectionStringsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`connection_strings expected to be basetypes.ObjectValue, was: %T`, connectionStringsAttribute)) + } + + createDateAttribute, ok := attributes["create_date"] + + if !ok { + diags.AddError( + "Attribute Missing", + `create_date is missing from object`) + + return nil, diags + } + + createDateVal, ok := createDateAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`create_date expected to be basetypes.StringValue, was: %T`, createDateAttribute)) + } + + diskWarmingModeAttribute, ok := attributes["disk_warming_mode"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_warming_mode is missing from object`) + + return nil, diags + } + + diskWarmingModeVal, ok := diskWarmingModeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_warming_mode expected to be basetypes.StringValue, was: %T`, diskWarmingModeAttribute)) + } + + encryptionAtRestProviderAttribute, ok := attributes["encryption_at_rest_provider"] + + if !ok { + diags.AddError( + "Attribute Missing", + `encryption_at_rest_provider is missing from object`) + + return nil, diags + } + + encryptionAtRestProviderVal, ok := encryptionAtRestProviderAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`encryption_at_rest_provider expected to be basetypes.StringValue, was: %T`, encryptionAtRestProviderAttribute)) + } + + featureCompatibilityVersionAttribute, ok := attributes["feature_compatibility_version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `feature_compatibility_version is missing from object`) + + return nil, diags + } + + featureCompatibilityVersionVal, ok := featureCompatibilityVersionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`feature_compatibility_version expected to be basetypes.StringValue, was: %T`, featureCompatibilityVersionAttribute)) + } + + featureCompatibilityVersionExpirationDateAttribute, ok := attributes["feature_compatibility_version_expiration_date"] + + if !ok { + diags.AddError( + "Attribute Missing", + `feature_compatibility_version_expiration_date is missing from object`) + + return nil, diags + } + + featureCompatibilityVersionExpirationDateVal, ok := featureCompatibilityVersionExpirationDateAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`feature_compatibility_version_expiration_date expected to be basetypes.StringValue, was: %T`, featureCompatibilityVersionExpirationDateAttribute)) + } + + globalClusterSelfManagedShardingAttribute, ok := attributes["global_cluster_self_managed_sharding"] + + if !ok { + diags.AddError( + "Attribute Missing", + `global_cluster_self_managed_sharding is missing from object`) + + return nil, diags + } + + globalClusterSelfManagedShardingVal, ok := globalClusterSelfManagedShardingAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`global_cluster_self_managed_sharding expected to be basetypes.BoolValue, was: %T`, globalClusterSelfManagedShardingAttribute)) + } + + groupIdAttribute, ok := attributes["group_id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `group_id is missing from object`) + + return nil, diags + } + + groupIdVal, ok := groupIdAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`group_id expected to be basetypes.StringValue, was: %T`, groupIdAttribute)) + } + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return nil, diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + labelsAttribute, ok := attributes["labels"] + + if !ok { + diags.AddError( + "Attribute Missing", + `labels is missing from object`) + + return nil, diags + } + + labelsVal, ok := labelsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`labels expected to be basetypes.ListValue, was: %T`, labelsAttribute)) + } + + linksAttribute, ok := attributes["links"] + + if !ok { + diags.AddError( + "Attribute Missing", + `links is missing from object`) + + return nil, diags + } + + linksVal, ok := linksAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`links expected to be basetypes.ListValue, was: %T`, linksAttribute)) + } + + mongoDbemployeeAccessGrantAttribute, ok := attributes["mongo_dbemployee_access_grant"] + + if !ok { + diags.AddError( + "Attribute Missing", + `mongo_dbemployee_access_grant is missing from object`) + + return nil, diags + } + + mongoDbemployeeAccessGrantVal, ok := mongoDbemployeeAccessGrantAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`mongo_dbemployee_access_grant expected to be basetypes.ObjectValue, was: %T`, mongoDbemployeeAccessGrantAttribute)) + } + + mongoDbmajorVersionAttribute, ok := attributes["mongo_dbmajor_version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `mongo_dbmajor_version is missing from object`) + + return nil, diags + } + + mongoDbmajorVersionVal, ok := mongoDbmajorVersionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`mongo_dbmajor_version expected to be basetypes.StringValue, was: %T`, mongoDbmajorVersionAttribute)) + } + + mongoDbversionAttribute, ok := attributes["mongo_dbversion"] + + if !ok { + diags.AddError( + "Attribute Missing", + `mongo_dbversion is missing from object`) + + return nil, diags + } + + mongoDbversionVal, ok := mongoDbversionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`mongo_dbversion expected to be basetypes.StringValue, was: %T`, mongoDbversionAttribute)) + } + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return nil, diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + pausedAttribute, ok := attributes["paused"] + + if !ok { + diags.AddError( + "Attribute Missing", + `paused is missing from object`) + + return nil, diags + } + + pausedVal, ok := pausedAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`paused expected to be basetypes.BoolValue, was: %T`, pausedAttribute)) + } + + pitEnabledAttribute, ok := attributes["pit_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `pit_enabled is missing from object`) + + return nil, diags + } + + pitEnabledVal, ok := pitEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`pit_enabled expected to be basetypes.BoolValue, was: %T`, pitEnabledAttribute)) + } + + redactClientLogDataAttribute, ok := attributes["redact_client_log_data"] + + if !ok { + diags.AddError( + "Attribute Missing", + `redact_client_log_data is missing from object`) + + return nil, diags + } + + redactClientLogDataVal, ok := redactClientLogDataAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`redact_client_log_data expected to be basetypes.BoolValue, was: %T`, redactClientLogDataAttribute)) + } + + replicaSetScalingStrategyAttribute, ok := attributes["replica_set_scaling_strategy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `replica_set_scaling_strategy is missing from object`) + + return nil, diags + } + + replicaSetScalingStrategyVal, ok := replicaSetScalingStrategyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`replica_set_scaling_strategy expected to be basetypes.StringValue, was: %T`, replicaSetScalingStrategyAttribute)) + } + + replicationSpecsAttribute, ok := attributes["replication_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `replication_specs is missing from object`) + + return nil, diags + } + + replicationSpecsVal, ok := replicationSpecsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`replication_specs expected to be basetypes.ListValue, was: %T`, replicationSpecsAttribute)) + } + + rootCertTypeAttribute, ok := attributes["root_cert_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `root_cert_type is missing from object`) + + return nil, diags + } + + rootCertTypeVal, ok := rootCertTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`root_cert_type expected to be basetypes.StringValue, was: %T`, rootCertTypeAttribute)) + } + + stateNameAttribute, ok := attributes["state_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `state_name is missing from object`) + + return nil, diags + } + + stateNameVal, ok := stateNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`state_name expected to be basetypes.StringValue, was: %T`, stateNameAttribute)) + } + + tagsAttribute, ok := attributes["tags"] + + if !ok { + diags.AddError( + "Attribute Missing", + `tags is missing from object`) + + return nil, diags + } + + tagsVal, ok := tagsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`tags expected to be basetypes.ListValue, was: %T`, tagsAttribute)) + } + + terminationProtectionEnabledAttribute, ok := attributes["termination_protection_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `termination_protection_enabled is missing from object`) + + return nil, diags + } + + terminationProtectionEnabledVal, ok := terminationProtectionEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`termination_protection_enabled expected to be basetypes.BoolValue, was: %T`, terminationProtectionEnabledAttribute)) + } + + versionReleaseSystemAttribute, ok := attributes["version_release_system"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version_release_system is missing from object`) + + return nil, diags + } + + versionReleaseSystemVal, ok := versionReleaseSystemAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version_release_system expected to be basetypes.StringValue, was: %T`, versionReleaseSystemAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ResultsValue{ + AcceptDataRisksAndForceReplicaSetReconfig: acceptDataRisksAndForceReplicaSetReconfigVal, + BackupEnabled: backupEnabledVal, + BiConnector: biConnectorVal, + ClusterType: clusterTypeVal, + ConfigServerManagementMode: configServerManagementModeVal, + ConfigServerType: configServerTypeVal, + ConnectionStrings: connectionStringsVal, + CreateDate: createDateVal, + DiskWarmingMode: diskWarmingModeVal, + EncryptionAtRestProvider: encryptionAtRestProviderVal, + FeatureCompatibilityVersion: featureCompatibilityVersionVal, + FeatureCompatibilityVersionExpirationDate: featureCompatibilityVersionExpirationDateVal, + GlobalClusterSelfManagedSharding: globalClusterSelfManagedShardingVal, + GroupId: groupIdVal, + Id: idVal, + Labels: labelsVal, + Links: linksVal, + MongoDbemployeeAccessGrant: mongoDbemployeeAccessGrantVal, + MongoDbmajorVersion: mongoDbmajorVersionVal, + MongoDbversion: mongoDbversionVal, + Name: nameVal, + Paused: pausedVal, + PitEnabled: pitEnabledVal, + RedactClientLogData: redactClientLogDataVal, + ReplicaSetScalingStrategy: replicaSetScalingStrategyVal, + ReplicationSpecs: replicationSpecsVal, + RootCertType: rootCertTypeVal, + StateName: stateNameVal, + Tags: tagsVal, + TerminationProtectionEnabled: terminationProtectionEnabledVal, + VersionReleaseSystem: versionReleaseSystemVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewResultsValueNull() ResultsValue { + return ResultsValue{ + state: attr.ValueStateNull, + } +} + +func NewResultsValueUnknown() ResultsValue { + return ResultsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewResultsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ResultsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ResultsValue Attribute Value", + "While creating a ResultsValue value, a missing attribute value was detected. "+ + "A ResultsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ResultsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ResultsValue Attribute Type", + "While creating a ResultsValue value, an invalid attribute value was detected. "+ + "A ResultsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ResultsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ResultsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ResultsValue Attribute Value", + "While creating a ResultsValue value, an extra attribute value was detected. "+ + "A ResultsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ResultsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewResultsValueUnknown(), diags + } + + acceptDataRisksAndForceReplicaSetReconfigAttribute, ok := attributes["accept_data_risks_and_force_replica_set_reconfig"] + + if !ok { + diags.AddError( + "Attribute Missing", + `accept_data_risks_and_force_replica_set_reconfig is missing from object`) + + return NewResultsValueUnknown(), diags + } + + acceptDataRisksAndForceReplicaSetReconfigVal, ok := acceptDataRisksAndForceReplicaSetReconfigAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`accept_data_risks_and_force_replica_set_reconfig expected to be basetypes.StringValue, was: %T`, acceptDataRisksAndForceReplicaSetReconfigAttribute)) + } + + backupEnabledAttribute, ok := attributes["backup_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `backup_enabled is missing from object`) + + return NewResultsValueUnknown(), diags + } + + backupEnabledVal, ok := backupEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`backup_enabled expected to be basetypes.BoolValue, was: %T`, backupEnabledAttribute)) + } + + biConnectorAttribute, ok := attributes["bi_connector"] + + if !ok { + diags.AddError( + "Attribute Missing", + `bi_connector is missing from object`) + + return NewResultsValueUnknown(), diags + } + + biConnectorVal, ok := biConnectorAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`bi_connector expected to be basetypes.ObjectValue, was: %T`, biConnectorAttribute)) + } + + clusterTypeAttribute, ok := attributes["cluster_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `cluster_type is missing from object`) + + return NewResultsValueUnknown(), diags + } + + clusterTypeVal, ok := clusterTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`cluster_type expected to be basetypes.StringValue, was: %T`, clusterTypeAttribute)) + } + + configServerManagementModeAttribute, ok := attributes["config_server_management_mode"] + + if !ok { + diags.AddError( + "Attribute Missing", + `config_server_management_mode is missing from object`) + + return NewResultsValueUnknown(), diags + } + + configServerManagementModeVal, ok := configServerManagementModeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`config_server_management_mode expected to be basetypes.StringValue, was: %T`, configServerManagementModeAttribute)) + } + + configServerTypeAttribute, ok := attributes["config_server_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `config_server_type is missing from object`) + + return NewResultsValueUnknown(), diags + } + + configServerTypeVal, ok := configServerTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`config_server_type expected to be basetypes.StringValue, was: %T`, configServerTypeAttribute)) + } + + connectionStringsAttribute, ok := attributes["connection_strings"] + + if !ok { + diags.AddError( + "Attribute Missing", + `connection_strings is missing from object`) + + return NewResultsValueUnknown(), diags + } + + connectionStringsVal, ok := connectionStringsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`connection_strings expected to be basetypes.ObjectValue, was: %T`, connectionStringsAttribute)) + } + + createDateAttribute, ok := attributes["create_date"] + + if !ok { + diags.AddError( + "Attribute Missing", + `create_date is missing from object`) + + return NewResultsValueUnknown(), diags + } + + createDateVal, ok := createDateAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`create_date expected to be basetypes.StringValue, was: %T`, createDateAttribute)) + } + + diskWarmingModeAttribute, ok := attributes["disk_warming_mode"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_warming_mode is missing from object`) + + return NewResultsValueUnknown(), diags + } + + diskWarmingModeVal, ok := diskWarmingModeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_warming_mode expected to be basetypes.StringValue, was: %T`, diskWarmingModeAttribute)) + } + + encryptionAtRestProviderAttribute, ok := attributes["encryption_at_rest_provider"] + + if !ok { + diags.AddError( + "Attribute Missing", + `encryption_at_rest_provider is missing from object`) + + return NewResultsValueUnknown(), diags + } + + encryptionAtRestProviderVal, ok := encryptionAtRestProviderAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`encryption_at_rest_provider expected to be basetypes.StringValue, was: %T`, encryptionAtRestProviderAttribute)) + } + + featureCompatibilityVersionAttribute, ok := attributes["feature_compatibility_version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `feature_compatibility_version is missing from object`) + + return NewResultsValueUnknown(), diags + } + + featureCompatibilityVersionVal, ok := featureCompatibilityVersionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`feature_compatibility_version expected to be basetypes.StringValue, was: %T`, featureCompatibilityVersionAttribute)) + } + + featureCompatibilityVersionExpirationDateAttribute, ok := attributes["feature_compatibility_version_expiration_date"] + + if !ok { + diags.AddError( + "Attribute Missing", + `feature_compatibility_version_expiration_date is missing from object`) + + return NewResultsValueUnknown(), diags + } + + featureCompatibilityVersionExpirationDateVal, ok := featureCompatibilityVersionExpirationDateAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`feature_compatibility_version_expiration_date expected to be basetypes.StringValue, was: %T`, featureCompatibilityVersionExpirationDateAttribute)) + } + + globalClusterSelfManagedShardingAttribute, ok := attributes["global_cluster_self_managed_sharding"] + + if !ok { + diags.AddError( + "Attribute Missing", + `global_cluster_self_managed_sharding is missing from object`) + + return NewResultsValueUnknown(), diags + } + + globalClusterSelfManagedShardingVal, ok := globalClusterSelfManagedShardingAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`global_cluster_self_managed_sharding expected to be basetypes.BoolValue, was: %T`, globalClusterSelfManagedShardingAttribute)) + } + + groupIdAttribute, ok := attributes["group_id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `group_id is missing from object`) + + return NewResultsValueUnknown(), diags + } + + groupIdVal, ok := groupIdAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`group_id expected to be basetypes.StringValue, was: %T`, groupIdAttribute)) + } + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return NewResultsValueUnknown(), diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + labelsAttribute, ok := attributes["labels"] + + if !ok { + diags.AddError( + "Attribute Missing", + `labels is missing from object`) + + return NewResultsValueUnknown(), diags + } + + labelsVal, ok := labelsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`labels expected to be basetypes.ListValue, was: %T`, labelsAttribute)) + } + + linksAttribute, ok := attributes["links"] + + if !ok { + diags.AddError( + "Attribute Missing", + `links is missing from object`) + + return NewResultsValueUnknown(), diags + } + + linksVal, ok := linksAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`links expected to be basetypes.ListValue, was: %T`, linksAttribute)) + } + + mongoDbemployeeAccessGrantAttribute, ok := attributes["mongo_dbemployee_access_grant"] + + if !ok { + diags.AddError( + "Attribute Missing", + `mongo_dbemployee_access_grant is missing from object`) + + return NewResultsValueUnknown(), diags + } + + mongoDbemployeeAccessGrantVal, ok := mongoDbemployeeAccessGrantAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`mongo_dbemployee_access_grant expected to be basetypes.ObjectValue, was: %T`, mongoDbemployeeAccessGrantAttribute)) + } + + mongoDbmajorVersionAttribute, ok := attributes["mongo_dbmajor_version"] + + if !ok { + diags.AddError( + "Attribute Missing", + `mongo_dbmajor_version is missing from object`) + + return NewResultsValueUnknown(), diags + } + + mongoDbmajorVersionVal, ok := mongoDbmajorVersionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`mongo_dbmajor_version expected to be basetypes.StringValue, was: %T`, mongoDbmajorVersionAttribute)) + } + + mongoDbversionAttribute, ok := attributes["mongo_dbversion"] + + if !ok { + diags.AddError( + "Attribute Missing", + `mongo_dbversion is missing from object`) + + return NewResultsValueUnknown(), diags + } + + mongoDbversionVal, ok := mongoDbversionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`mongo_dbversion expected to be basetypes.StringValue, was: %T`, mongoDbversionAttribute)) + } + + nameAttribute, ok := attributes["name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `name is missing from object`) + + return NewResultsValueUnknown(), diags + } + + nameVal, ok := nameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`name expected to be basetypes.StringValue, was: %T`, nameAttribute)) + } + + pausedAttribute, ok := attributes["paused"] + + if !ok { + diags.AddError( + "Attribute Missing", + `paused is missing from object`) + + return NewResultsValueUnknown(), diags + } + + pausedVal, ok := pausedAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`paused expected to be basetypes.BoolValue, was: %T`, pausedAttribute)) + } + + pitEnabledAttribute, ok := attributes["pit_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `pit_enabled is missing from object`) + + return NewResultsValueUnknown(), diags + } + + pitEnabledVal, ok := pitEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`pit_enabled expected to be basetypes.BoolValue, was: %T`, pitEnabledAttribute)) + } + + redactClientLogDataAttribute, ok := attributes["redact_client_log_data"] + + if !ok { + diags.AddError( + "Attribute Missing", + `redact_client_log_data is missing from object`) + + return NewResultsValueUnknown(), diags + } + + redactClientLogDataVal, ok := redactClientLogDataAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`redact_client_log_data expected to be basetypes.BoolValue, was: %T`, redactClientLogDataAttribute)) + } + + replicaSetScalingStrategyAttribute, ok := attributes["replica_set_scaling_strategy"] + + if !ok { + diags.AddError( + "Attribute Missing", + `replica_set_scaling_strategy is missing from object`) + + return NewResultsValueUnknown(), diags + } + + replicaSetScalingStrategyVal, ok := replicaSetScalingStrategyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`replica_set_scaling_strategy expected to be basetypes.StringValue, was: %T`, replicaSetScalingStrategyAttribute)) + } + + replicationSpecsAttribute, ok := attributes["replication_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `replication_specs is missing from object`) + + return NewResultsValueUnknown(), diags + } + + replicationSpecsVal, ok := replicationSpecsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`replication_specs expected to be basetypes.ListValue, was: %T`, replicationSpecsAttribute)) + } + + rootCertTypeAttribute, ok := attributes["root_cert_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `root_cert_type is missing from object`) + + return NewResultsValueUnknown(), diags + } + + rootCertTypeVal, ok := rootCertTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`root_cert_type expected to be basetypes.StringValue, was: %T`, rootCertTypeAttribute)) + } + + stateNameAttribute, ok := attributes["state_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `state_name is missing from object`) + + return NewResultsValueUnknown(), diags + } + + stateNameVal, ok := stateNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`state_name expected to be basetypes.StringValue, was: %T`, stateNameAttribute)) + } + + tagsAttribute, ok := attributes["tags"] + + if !ok { + diags.AddError( + "Attribute Missing", + `tags is missing from object`) + + return NewResultsValueUnknown(), diags + } + + tagsVal, ok := tagsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`tags expected to be basetypes.ListValue, was: %T`, tagsAttribute)) + } + + terminationProtectionEnabledAttribute, ok := attributes["termination_protection_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `termination_protection_enabled is missing from object`) + + return NewResultsValueUnknown(), diags + } + + terminationProtectionEnabledVal, ok := terminationProtectionEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`termination_protection_enabled expected to be basetypes.BoolValue, was: %T`, terminationProtectionEnabledAttribute)) + } + + versionReleaseSystemAttribute, ok := attributes["version_release_system"] + + if !ok { + diags.AddError( + "Attribute Missing", + `version_release_system is missing from object`) + + return NewResultsValueUnknown(), diags + } + + versionReleaseSystemVal, ok := versionReleaseSystemAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`version_release_system expected to be basetypes.StringValue, was: %T`, versionReleaseSystemAttribute)) + } + + if diags.HasError() { + return NewResultsValueUnknown(), diags + } + + return ResultsValue{ + AcceptDataRisksAndForceReplicaSetReconfig: acceptDataRisksAndForceReplicaSetReconfigVal, + BackupEnabled: backupEnabledVal, + BiConnector: biConnectorVal, + ClusterType: clusterTypeVal, + ConfigServerManagementMode: configServerManagementModeVal, + ConfigServerType: configServerTypeVal, + ConnectionStrings: connectionStringsVal, + CreateDate: createDateVal, + DiskWarmingMode: diskWarmingModeVal, + EncryptionAtRestProvider: encryptionAtRestProviderVal, + FeatureCompatibilityVersion: featureCompatibilityVersionVal, + FeatureCompatibilityVersionExpirationDate: featureCompatibilityVersionExpirationDateVal, + GlobalClusterSelfManagedSharding: globalClusterSelfManagedShardingVal, + GroupId: groupIdVal, + Id: idVal, + Labels: labelsVal, + Links: linksVal, + MongoDbemployeeAccessGrant: mongoDbemployeeAccessGrantVal, + MongoDbmajorVersion: mongoDbmajorVersionVal, + MongoDbversion: mongoDbversionVal, + Name: nameVal, + Paused: pausedVal, + PitEnabled: pitEnabledVal, + RedactClientLogData: redactClientLogDataVal, + ReplicaSetScalingStrategy: replicaSetScalingStrategyVal, + ReplicationSpecs: replicationSpecsVal, + RootCertType: rootCertTypeVal, + StateName: stateNameVal, + Tags: tagsVal, + TerminationProtectionEnabled: terminationProtectionEnabledVal, + VersionReleaseSystem: versionReleaseSystemVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewResultsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ResultsValue { + object, diags := NewResultsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewResultsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ResultsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewResultsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewResultsValueUnknown(), nil + } + + if in.IsNull() { + return NewResultsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewResultsValueMust(ResultsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ResultsType) ValueType(ctx context.Context) attr.Value { + return ResultsValue{} +} + +var _ basetypes.ObjectValuable = ResultsValue{} + +type ResultsValue struct { + Labels basetypes.ListValue `tfsdk:"labels"` + Tags basetypes.ListValue `tfsdk:"tags"` + ReplicationSpecs basetypes.ListValue `tfsdk:"replication_specs"` + Links basetypes.ListValue `tfsdk:"links"` + DiskWarmingMode basetypes.StringValue `tfsdk:"disk_warming_mode"` + MongoDbemployeeAccessGrant basetypes.ObjectValue `tfsdk:"mongo_dbemployee_access_grant"` + ConnectionStrings basetypes.ObjectValue `tfsdk:"connection_strings"` + CreateDate basetypes.StringValue `tfsdk:"create_date"` + AcceptDataRisksAndForceReplicaSetReconfig basetypes.StringValue `tfsdk:"accept_data_risks_and_force_replica_set_reconfig"` + EncryptionAtRestProvider basetypes.StringValue `tfsdk:"encryption_at_rest_provider"` + FeatureCompatibilityVersion basetypes.StringValue `tfsdk:"feature_compatibility_version"` + FeatureCompatibilityVersionExpirationDate basetypes.StringValue `tfsdk:"feature_compatibility_version_expiration_date"` + VersionReleaseSystem basetypes.StringValue `tfsdk:"version_release_system"` + GroupId basetypes.StringValue `tfsdk:"group_id"` + Id basetypes.StringValue `tfsdk:"id"` + ConfigServerManagementMode basetypes.StringValue `tfsdk:"config_server_management_mode"` + ClusterType basetypes.StringValue `tfsdk:"cluster_type"` + ConfigServerType basetypes.StringValue `tfsdk:"config_server_type"` + MongoDbmajorVersion basetypes.StringValue `tfsdk:"mongo_dbmajor_version"` + MongoDbversion basetypes.StringValue `tfsdk:"mongo_dbversion"` + Name basetypes.StringValue `tfsdk:"name"` + StateName basetypes.StringValue `tfsdk:"state_name"` + RootCertType basetypes.StringValue `tfsdk:"root_cert_type"` + BiConnector basetypes.ObjectValue `tfsdk:"bi_connector"` + ReplicaSetScalingStrategy basetypes.StringValue `tfsdk:"replica_set_scaling_strategy"` + RedactClientLogData basetypes.BoolValue `tfsdk:"redact_client_log_data"` + PitEnabled basetypes.BoolValue `tfsdk:"pit_enabled"` + Paused basetypes.BoolValue `tfsdk:"paused"` + BackupEnabled basetypes.BoolValue `tfsdk:"backup_enabled"` + TerminationProtectionEnabled basetypes.BoolValue `tfsdk:"termination_protection_enabled"` + GlobalClusterSelfManagedSharding basetypes.BoolValue `tfsdk:"global_cluster_self_managed_sharding"` + state attr.ValueState +} + +func (v ResultsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 31) + + var val tftypes.Value + var err error + + attrTypes["accept_data_risks_and_force_replica_set_reconfig"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["backup_enabled"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["bi_connector"] = basetypes.ObjectType{ + AttrTypes: BiConnectorValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["cluster_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["config_server_management_mode"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["config_server_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["connection_strings"] = basetypes.ObjectType{ + AttrTypes: ConnectionStringsValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["create_date"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["disk_warming_mode"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["encryption_at_rest_provider"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["feature_compatibility_version"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["feature_compatibility_version_expiration_date"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["global_cluster_self_managed_sharding"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["group_id"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["labels"] = basetypes.ListType{ + ElemType: LabelsValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["links"] = basetypes.ListType{ + ElemType: LinksValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["mongo_dbemployee_access_grant"] = basetypes.ObjectType{ + AttrTypes: MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["mongo_dbmajor_version"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["mongo_dbversion"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["paused"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["pit_enabled"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["redact_client_log_data"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["replica_set_scaling_strategy"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["replication_specs"] = basetypes.ListType{ + ElemType: ReplicationSpecsValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["root_cert_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["state_name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["tags"] = basetypes.ListType{ + ElemType: TagsValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["termination_protection_enabled"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["version_release_system"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 31) + + val, err = v.AcceptDataRisksAndForceReplicaSetReconfig.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["accept_data_risks_and_force_replica_set_reconfig"] = val + + val, err = v.BackupEnabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["backup_enabled"] = val + + val, err = v.BiConnector.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["bi_connector"] = val + + val, err = v.ClusterType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["cluster_type"] = val + + val, err = v.ConfigServerManagementMode.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["config_server_management_mode"] = val + + val, err = v.ConfigServerType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["config_server_type"] = val + + val, err = v.ConnectionStrings.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["connection_strings"] = val + + val, err = v.CreateDate.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["create_date"] = val + + val, err = v.DiskWarmingMode.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_warming_mode"] = val + + val, err = v.EncryptionAtRestProvider.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["encryption_at_rest_provider"] = val + + val, err = v.FeatureCompatibilityVersion.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["feature_compatibility_version"] = val + + val, err = v.FeatureCompatibilityVersionExpirationDate.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["feature_compatibility_version_expiration_date"] = val + + val, err = v.GlobalClusterSelfManagedSharding.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["global_cluster_self_managed_sharding"] = val + + val, err = v.GroupId.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["group_id"] = val + + val, err = v.Id.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["id"] = val + + val, err = v.Labels.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["labels"] = val + + val, err = v.Links.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["links"] = val + + val, err = v.MongoDbemployeeAccessGrant.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["mongo_dbemployee_access_grant"] = val + + val, err = v.MongoDbmajorVersion.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["mongo_dbmajor_version"] = val + + val, err = v.MongoDbversion.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["mongo_dbversion"] = val + + val, err = v.Name.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["name"] = val + + val, err = v.Paused.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["paused"] = val + + val, err = v.PitEnabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["pit_enabled"] = val + + val, err = v.RedactClientLogData.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["redact_client_log_data"] = val + + val, err = v.ReplicaSetScalingStrategy.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["replica_set_scaling_strategy"] = val + + val, err = v.ReplicationSpecs.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["replication_specs"] = val + + val, err = v.RootCertType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["root_cert_type"] = val + + val, err = v.StateName.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["state_name"] = val + + val, err = v.Tags.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["tags"] = val + + val, err = v.TerminationProtectionEnabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["termination_protection_enabled"] = val + + val, err = v.VersionReleaseSystem.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["version_release_system"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ResultsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ResultsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ResultsValue) String() string { + return "ResultsValue" +} + +func (v ResultsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var biConnector basetypes.ObjectValue + + if v.BiConnector.IsNull() { + biConnector = types.ObjectNull( + BiConnectorValue{}.AttributeTypes(ctx), + ) + } + + if v.BiConnector.IsUnknown() { + biConnector = types.ObjectUnknown( + BiConnectorValue{}.AttributeTypes(ctx), + ) + } + + if !v.BiConnector.IsNull() && !v.BiConnector.IsUnknown() { + biConnector = types.ObjectValueMust( + BiConnectorValue{}.AttributeTypes(ctx), + v.BiConnector.Attributes(), + ) + } + + var connectionStrings basetypes.ObjectValue + + if v.ConnectionStrings.IsNull() { + connectionStrings = types.ObjectNull( + ConnectionStringsValue{}.AttributeTypes(ctx), + ) + } + + if v.ConnectionStrings.IsUnknown() { + connectionStrings = types.ObjectUnknown( + ConnectionStringsValue{}.AttributeTypes(ctx), + ) + } + + if !v.ConnectionStrings.IsNull() && !v.ConnectionStrings.IsUnknown() { + connectionStrings = types.ObjectValueMust( + ConnectionStringsValue{}.AttributeTypes(ctx), + v.ConnectionStrings.Attributes(), + ) + } + + labels := types.ListValueMust( + LabelsType{ + basetypes.ObjectType{ + AttrTypes: LabelsValue{}.AttributeTypes(ctx), + }, + }, + v.Labels.Elements(), + ) + + if v.Labels.IsNull() { + labels = types.ListNull( + LabelsType{ + basetypes.ObjectType{ + AttrTypes: LabelsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Labels.IsUnknown() { + labels = types.ListUnknown( + LabelsType{ + basetypes.ObjectType{ + AttrTypes: LabelsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + links := types.ListValueMust( + LinksType{ + basetypes.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + v.Links.Elements(), + ) + + if v.Links.IsNull() { + links = types.ListNull( + LinksType{ + basetypes.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Links.IsUnknown() { + links = types.ListUnknown( + LinksType{ + basetypes.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + var mongoDbemployeeAccessGrant basetypes.ObjectValue + + if v.MongoDbemployeeAccessGrant.IsNull() { + mongoDbemployeeAccessGrant = types.ObjectNull( + MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + ) + } + + if v.MongoDbemployeeAccessGrant.IsUnknown() { + mongoDbemployeeAccessGrant = types.ObjectUnknown( + MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + ) + } + + if !v.MongoDbemployeeAccessGrant.IsNull() && !v.MongoDbemployeeAccessGrant.IsUnknown() { + mongoDbemployeeAccessGrant = types.ObjectValueMust( + MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + v.MongoDbemployeeAccessGrant.Attributes(), + ) + } + + replicationSpecs := types.ListValueMust( + ReplicationSpecsType{ + basetypes.ObjectType{ + AttrTypes: ReplicationSpecsValue{}.AttributeTypes(ctx), + }, + }, + v.ReplicationSpecs.Elements(), + ) + + if v.ReplicationSpecs.IsNull() { + replicationSpecs = types.ListNull( + ReplicationSpecsType{ + basetypes.ObjectType{ + AttrTypes: ReplicationSpecsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.ReplicationSpecs.IsUnknown() { + replicationSpecs = types.ListUnknown( + ReplicationSpecsType{ + basetypes.ObjectType{ + AttrTypes: ReplicationSpecsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + tags := types.ListValueMust( + TagsType{ + basetypes.ObjectType{ + AttrTypes: TagsValue{}.AttributeTypes(ctx), + }, + }, + v.Tags.Elements(), + ) + + if v.Tags.IsNull() { + tags = types.ListNull( + TagsType{ + basetypes.ObjectType{ + AttrTypes: TagsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Tags.IsUnknown() { + tags = types.ListUnknown( + TagsType{ + basetypes.ObjectType{ + AttrTypes: TagsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "accept_data_risks_and_force_replica_set_reconfig": basetypes.StringType{}, + "backup_enabled": basetypes.BoolType{}, + "bi_connector": basetypes.ObjectType{ + AttrTypes: BiConnectorValue{}.AttributeTypes(ctx), + }, + "cluster_type": basetypes.StringType{}, + "config_server_management_mode": basetypes.StringType{}, + "config_server_type": basetypes.StringType{}, + "connection_strings": basetypes.ObjectType{ + AttrTypes: ConnectionStringsValue{}.AttributeTypes(ctx), + }, + "create_date": basetypes.StringType{}, + "disk_warming_mode": basetypes.StringType{}, + "encryption_at_rest_provider": basetypes.StringType{}, + "feature_compatibility_version": basetypes.StringType{}, + "feature_compatibility_version_expiration_date": basetypes.StringType{}, + "global_cluster_self_managed_sharding": basetypes.BoolType{}, + "group_id": basetypes.StringType{}, + "id": basetypes.StringType{}, + "labels": basetypes.ListType{ + ElemType: LabelsValue{}.Type(ctx), + }, + "links": basetypes.ListType{ + ElemType: LinksValue{}.Type(ctx), + }, + "mongo_dbemployee_access_grant": basetypes.ObjectType{ + AttrTypes: MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + }, + "mongo_dbmajor_version": basetypes.StringType{}, + "mongo_dbversion": basetypes.StringType{}, + "name": basetypes.StringType{}, + "paused": basetypes.BoolType{}, + "pit_enabled": basetypes.BoolType{}, + "redact_client_log_data": basetypes.BoolType{}, + "replica_set_scaling_strategy": basetypes.StringType{}, + "replication_specs": basetypes.ListType{ + ElemType: ReplicationSpecsValue{}.Type(ctx), + }, + "root_cert_type": basetypes.StringType{}, + "state_name": basetypes.StringType{}, + "tags": basetypes.ListType{ + ElemType: TagsValue{}.Type(ctx), + }, + "termination_protection_enabled": basetypes.BoolType{}, + "version_release_system": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "accept_data_risks_and_force_replica_set_reconfig": v.AcceptDataRisksAndForceReplicaSetReconfig, + "backup_enabled": v.BackupEnabled, + "bi_connector": biConnector, + "cluster_type": v.ClusterType, + "config_server_management_mode": v.ConfigServerManagementMode, + "config_server_type": v.ConfigServerType, + "connection_strings": connectionStrings, + "create_date": v.CreateDate, + "disk_warming_mode": v.DiskWarmingMode, + "encryption_at_rest_provider": v.EncryptionAtRestProvider, + "feature_compatibility_version": v.FeatureCompatibilityVersion, + "feature_compatibility_version_expiration_date": v.FeatureCompatibilityVersionExpirationDate, + "global_cluster_self_managed_sharding": v.GlobalClusterSelfManagedSharding, + "group_id": v.GroupId, + "id": v.Id, + "labels": labels, + "links": links, + "mongo_dbemployee_access_grant": mongoDbemployeeAccessGrant, + "mongo_dbmajor_version": v.MongoDbmajorVersion, + "mongo_dbversion": v.MongoDbversion, + "name": v.Name, + "paused": v.Paused, + "pit_enabled": v.PitEnabled, + "redact_client_log_data": v.RedactClientLogData, + "replica_set_scaling_strategy": v.ReplicaSetScalingStrategy, + "replication_specs": replicationSpecs, + "root_cert_type": v.RootCertType, + "state_name": v.StateName, + "tags": tags, + "termination_protection_enabled": v.TerminationProtectionEnabled, + "version_release_system": v.VersionReleaseSystem, + }) + + return objVal, diags +} + +func (v ResultsValue) Equal(o attr.Value) bool { + other, ok := o.(ResultsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.AcceptDataRisksAndForceReplicaSetReconfig.Equal(other.AcceptDataRisksAndForceReplicaSetReconfig) { + return false + } + + if !v.BackupEnabled.Equal(other.BackupEnabled) { + return false + } + + if !v.BiConnector.Equal(other.BiConnector) { + return false + } + + if !v.ClusterType.Equal(other.ClusterType) { + return false + } + + if !v.ConfigServerManagementMode.Equal(other.ConfigServerManagementMode) { + return false + } + + if !v.ConfigServerType.Equal(other.ConfigServerType) { + return false + } + + if !v.ConnectionStrings.Equal(other.ConnectionStrings) { + return false + } + + if !v.CreateDate.Equal(other.CreateDate) { + return false + } + + if !v.DiskWarmingMode.Equal(other.DiskWarmingMode) { + return false + } + + if !v.EncryptionAtRestProvider.Equal(other.EncryptionAtRestProvider) { + return false + } + + if !v.FeatureCompatibilityVersion.Equal(other.FeatureCompatibilityVersion) { + return false + } + + if !v.FeatureCompatibilityVersionExpirationDate.Equal(other.FeatureCompatibilityVersionExpirationDate) { + return false + } + + if !v.GlobalClusterSelfManagedSharding.Equal(other.GlobalClusterSelfManagedSharding) { + return false + } + + if !v.GroupId.Equal(other.GroupId) { + return false + } + + if !v.Id.Equal(other.Id) { + return false + } + + if !v.Labels.Equal(other.Labels) { + return false + } + + if !v.Links.Equal(other.Links) { + return false + } + + if !v.MongoDbemployeeAccessGrant.Equal(other.MongoDbemployeeAccessGrant) { + return false + } + + if !v.MongoDbmajorVersion.Equal(other.MongoDbmajorVersion) { + return false + } + + if !v.MongoDbversion.Equal(other.MongoDbversion) { + return false + } + + if !v.Name.Equal(other.Name) { + return false + } + + if !v.Paused.Equal(other.Paused) { + return false + } + + if !v.PitEnabled.Equal(other.PitEnabled) { + return false + } + + if !v.RedactClientLogData.Equal(other.RedactClientLogData) { + return false + } + + if !v.ReplicaSetScalingStrategy.Equal(other.ReplicaSetScalingStrategy) { + return false + } + + if !v.ReplicationSpecs.Equal(other.ReplicationSpecs) { + return false + } + + if !v.RootCertType.Equal(other.RootCertType) { + return false + } + + if !v.StateName.Equal(other.StateName) { + return false + } + + if !v.Tags.Equal(other.Tags) { + return false + } + + if !v.TerminationProtectionEnabled.Equal(other.TerminationProtectionEnabled) { + return false + } + + if !v.VersionReleaseSystem.Equal(other.VersionReleaseSystem) { + return false + } + + return true +} + +func (v ResultsValue) Type(ctx context.Context) attr.Type { + return ResultsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ResultsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "accept_data_risks_and_force_replica_set_reconfig": basetypes.StringType{}, + "backup_enabled": basetypes.BoolType{}, + "bi_connector": basetypes.ObjectType{ + AttrTypes: BiConnectorValue{}.AttributeTypes(ctx), + }, + "cluster_type": basetypes.StringType{}, + "config_server_management_mode": basetypes.StringType{}, + "config_server_type": basetypes.StringType{}, + "connection_strings": basetypes.ObjectType{ + AttrTypes: ConnectionStringsValue{}.AttributeTypes(ctx), + }, + "create_date": basetypes.StringType{}, + "disk_warming_mode": basetypes.StringType{}, + "encryption_at_rest_provider": basetypes.StringType{}, + "feature_compatibility_version": basetypes.StringType{}, + "feature_compatibility_version_expiration_date": basetypes.StringType{}, + "global_cluster_self_managed_sharding": basetypes.BoolType{}, + "group_id": basetypes.StringType{}, + "id": basetypes.StringType{}, + "labels": basetypes.ListType{ + ElemType: LabelsValue{}.Type(ctx), + }, + "links": basetypes.ListType{ + ElemType: LinksValue{}.Type(ctx), + }, + "mongo_dbemployee_access_grant": basetypes.ObjectType{ + AttrTypes: MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + }, + "mongo_dbmajor_version": basetypes.StringType{}, + "mongo_dbversion": basetypes.StringType{}, + "name": basetypes.StringType{}, + "paused": basetypes.BoolType{}, + "pit_enabled": basetypes.BoolType{}, + "redact_client_log_data": basetypes.BoolType{}, + "replica_set_scaling_strategy": basetypes.StringType{}, + "replication_specs": basetypes.ListType{ + ElemType: ReplicationSpecsValue{}.Type(ctx), + }, + "root_cert_type": basetypes.StringType{}, + "state_name": basetypes.StringType{}, + "tags": basetypes.ListType{ + ElemType: TagsValue{}.Type(ctx), + }, + "termination_protection_enabled": basetypes.BoolType{}, + "version_release_system": basetypes.StringType{}, + } +} diff --git a/internal/service/advancedclustertpf/resource.go b/internal/service/advancedclustertpf/resource.go new file mode 100644 index 0000000000..5ec1bf5634 --- /dev/null +++ b/internal/service/advancedclustertpf/resource.go @@ -0,0 +1,50 @@ +package advancedclustertpf + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" +) + +var _ resource.ResourceWithConfigure = &rs{} +var _ resource.ResourceWithImportState = &rs{} + +func Resource() resource.Resource { + return &rs{ + RSCommon: config.RSCommon{ + ResourceName: resourceName, + }, + } +} + +type rs struct { + config.RSCommon +} + +func (r *rs) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{} // TEMPORARY: empty schema, change later to ResourceSchema(ctx) +} + +func (r *rs) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var tfModel struct{} // TEMPORARY: empty model + resp.Diagnostics.Append(req.Plan.Get(ctx, &tfModel)...) + if resp.Diagnostics.HasError() { + return + } + var tfNewModel struct{} // TEMPORARY: empty model + resp.Diagnostics.Append(resp.State.Set(ctx, tfNewModel)...) +} + +func (r *rs) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +} + +func (r *rs) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *rs) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +} + +func (r *rs) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +} diff --git a/internal/service/advancedclustertpf/resource_schema.go b/internal/service/advancedclustertpf/resource_schema.go new file mode 100644 index 0000000000..5434528fad --- /dev/null +++ b/internal/service/advancedclustertpf/resource_schema.go @@ -0,0 +1,9375 @@ +package advancedclustertpf + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" + + "github.com/hashicorp/terraform-plugin-framework/resource/schema" +) + +func ResourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "accept_data_risks_and_force_replica_set_reconfig": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set **acceptDataRisksAndForceReplicaSetReconfig** to the current date.", + MarkdownDescription: "If reconfiguration is necessary to regain a primary due to a regional outage, submit this field alongside your topology reconfiguration to request a new regional outage resistant topology. Forced reconfigurations during an outage of the majority of electable nodes carry a risk of data loss if replicated writes (even majority committed writes) have not been replicated to the new primary node. MongoDB Atlas docs contain more information. To proceed with an operation which carries that risk, set **acceptDataRisksAndForceReplicaSetReconfig** to the current date.", + }, + "backup_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether the cluster can perform backups. If set to `true`, the cluster can perform backups. You must set this value to `true` for NVMe clusters. Backup uses [Cloud Backups](https://docs.atlas.mongodb.com/backup/cloud-backup/overview/) for dedicated clusters and [Shared Cluster Backups](https://docs.atlas.mongodb.com/backup/shared-tier/overview/) for tenant clusters. If set to `false`, the cluster doesn't use backups.", + MarkdownDescription: "Flag that indicates whether the cluster can perform backups. If set to `true`, the cluster can perform backups. You must set this value to `true` for NVMe clusters. Backup uses [Cloud Backups](https://docs.atlas.mongodb.com/backup/cloud-backup/overview/) for dedicated clusters and [Shared Cluster Backups](https://docs.atlas.mongodb.com/backup/shared-tier/overview/) for tenant clusters. If set to `false`, the cluster doesn't use backups.", + Default: booldefault.StaticBool(false), + }, + "bi_connector": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether MongoDB Connector for Business Intelligence is enabled on the specified cluster.", + MarkdownDescription: "Flag that indicates whether MongoDB Connector for Business Intelligence is enabled on the specified cluster.", + }, + "read_preference": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Data source node designated for the MongoDB Connector for Business Intelligence on MongoDB Cloud. The MongoDB Connector for Business Intelligence on MongoDB Cloud reads data from the primary, secondary, or analytics node based on your read preferences. Defaults to `ANALYTICS` node, or `SECONDARY` if there are no `ANALYTICS` nodes.", + MarkdownDescription: "Data source node designated for the MongoDB Connector for Business Intelligence on MongoDB Cloud. The MongoDB Connector for Business Intelligence on MongoDB Cloud reads data from the primary, secondary, or analytics node based on your read preferences. Defaults to `ANALYTICS` node, or `SECONDARY` if there are no `ANALYTICS` nodes.", + }, + }, + CustomType: BiConnectorType{ + ObjectType: types.ObjectType{ + AttrTypes: BiConnectorValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Settings needed to configure the MongoDB Connector for Business Intelligence for this cluster.", + MarkdownDescription: "Settings needed to configure the MongoDB Connector for Business Intelligence for this cluster.", + }, + "cluster_name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Human-readable label that identifies this cluster.", + MarkdownDescription: "Human-readable label that identifies this cluster.", + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-zA-Z0-9][a-zA-Z0-9-]*)?[a-zA-Z0-9]+$"), ""), + }, + }, + "cluster_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Configuration of nodes that comprise the cluster.", + MarkdownDescription: "Configuration of nodes that comprise the cluster.", + }, + "config_server_management_mode": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Config Server Management Mode for creating or updating a sharded cluster.\n\nWhen configured as ATLAS_MANAGED, atlas may automatically switch the cluster's config server type for optimal performance and savings.\n\nWhen configured as FIXED_TO_DEDICATED, the cluster will always use a dedicated config server.", + MarkdownDescription: "Config Server Management Mode for creating or updating a sharded cluster.\n\nWhen configured as ATLAS_MANAGED, atlas may automatically switch the cluster's config server type for optimal performance and savings.\n\nWhen configured as FIXED_TO_DEDICATED, the cluster will always use a dedicated config server.", + Default: stringdefault.StaticString("ATLAS_MANAGED"), + }, + "config_server_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Describes a sharded cluster's config server type.", + MarkdownDescription: "Describes a sharded cluster's config server type.", + }, + "connection_strings": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "aws_private_link": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + Computed: true, + Description: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to MongoDB Cloud through the interface endpoint that the key names.", + MarkdownDescription: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to MongoDB Cloud through the interface endpoint that the key names.", + }, + "aws_private_link_srv": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + Computed: true, + Description: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to Atlas through the interface endpoint that the key names.", + MarkdownDescription: "Private endpoint-aware connection strings that use AWS-hosted clusters with Amazon Web Services (AWS) PrivateLink. Each key identifies an Amazon Web Services (AWS) interface endpoint. Each value identifies the related `mongodb://` connection string that you use to connect to Atlas through the interface endpoint that the key names.", + }, + "private": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter once someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn't, use connectionStrings.private. For Amazon Web Services (AWS) clusters, this resource returns this parameter only if you enable custom DNS.", + MarkdownDescription: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter once someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn't, use connectionStrings.private. For Amazon Web Services (AWS) clusters, this resource returns this parameter only if you enable custom DNS.", + }, + "private_endpoint": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "connection_string": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Private endpoint-aware connection string that uses the `mongodb://` protocol to connect to MongoDB Cloud through a private endpoint.", + MarkdownDescription: "Private endpoint-aware connection string that uses the `mongodb://` protocol to connect to MongoDB Cloud through a private endpoint.", + }, + "endpoints": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "endpoint_id": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Unique string that the cloud provider uses to identify the private endpoint.", + MarkdownDescription: "Unique string that the cloud provider uses to identify the private endpoint.", + }, + "provider_name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Cloud provider in which MongoDB Cloud deploys the private endpoint.", + MarkdownDescription: "Cloud provider in which MongoDB Cloud deploys the private endpoint.", + }, + "region": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Region where the private endpoint is deployed.", + MarkdownDescription: "Region where the private endpoint is deployed.", + }, + }, + CustomType: EndpointsType{ + ObjectType: types.ObjectType{ + AttrTypes: EndpointsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "List that contains the private endpoints through which you connect to MongoDB Cloud when you use **connectionStrings.privateEndpoint[n].connectionString** or **connectionStrings.privateEndpoint[n].srvConnectionString**.", + MarkdownDescription: "List that contains the private endpoints through which you connect to MongoDB Cloud when you use **connectionStrings.privateEndpoint[n].connectionString** or **connectionStrings.privateEndpoint[n].srvConnectionString**.", + }, + "srv_connection_string": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Private endpoint-aware connection string that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. The `mongodb+srv` protocol tells the driver to look up the seed list of hosts in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application supports it. If it doesn't, use connectionStrings.privateEndpoint[n].connectionString.", + MarkdownDescription: "Private endpoint-aware connection string that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. The `mongodb+srv` protocol tells the driver to look up the seed list of hosts in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application supports it. If it doesn't, use connectionStrings.privateEndpoint[n].connectionString.", + }, + "srv_shard_optimized_connection_string": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Private endpoint-aware connection string optimized for sharded clusters that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application and Atlas cluster supports it. If it doesn't, use and consult the documentation for connectionStrings.privateEndpoint[n].srvConnectionString.", + MarkdownDescription: "Private endpoint-aware connection string optimized for sharded clusters that uses the `mongodb+srv://` protocol to connect to MongoDB Cloud through a private endpoint. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application and Atlas cluster supports it. If it doesn't, use and consult the documentation for connectionStrings.privateEndpoint[n].srvConnectionString.", + }, + "type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "MongoDB process type to which your application connects. Use `MONGOD` for replica sets and `MONGOS` for sharded clusters.", + MarkdownDescription: "MongoDB process type to which your application connects. Use `MONGOD` for replica sets and `MONGOS` for sharded clusters.", + }, + }, + CustomType: PrivateEndpointType{ + ObjectType: types.ObjectType{ + AttrTypes: PrivateEndpointValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "List of private endpoint-aware connection strings that you can use to connect to this cluster through a private endpoint. This parameter returns only if you deployed a private endpoint to all regions to which you deployed this clusters' nodes.", + MarkdownDescription: "List of private endpoint-aware connection strings that you can use to connect to this cluster through a private endpoint. This parameter returns only if you deployed a private endpoint to all regions to which you deployed this clusters' nodes.", + }, + "private_srv": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter when someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your driver supports it. If it doesn't, use `connectionStrings.private`. For Amazon Web Services (AWS) clusters, this parameter returns only if you [enable custom DNS](https://docs.atlas.mongodb.com/reference/api/aws-custom-dns-update/).", + MarkdownDescription: "Network peering connection strings for each interface Virtual Private Cloud (VPC) endpoint that you configured to connect to this cluster. This connection string uses the `mongodb+srv://` protocol. The resource returns this parameter when someone creates a network peering connection to this cluster. This protocol tells the application to look up the host seed list in the Domain Name System (DNS). This list synchronizes with the nodes in a cluster. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to append the seed list or change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your driver supports it. If it doesn't, use `connectionStrings.private`. For Amazon Web Services (AWS) clusters, this parameter returns only if you [enable custom DNS](https://docs.atlas.mongodb.com/reference/api/aws-custom-dns-update/).", + }, + "standard": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb://` protocol.", + MarkdownDescription: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb://` protocol.", + }, + "standard_srv": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb+srv://` protocol.", + MarkdownDescription: "Public connection string that you can use to connect to this cluster. This connection string uses the `mongodb+srv://` protocol.", + }, + }, + CustomType: ConnectionStringsType{ + ObjectType: types.ObjectType{ + AttrTypes: ConnectionStringsValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Collection of Uniform Resource Locators that point to the MongoDB database.", + MarkdownDescription: "Collection of Uniform Resource Locators that point to the MongoDB database.", + }, + "create_date": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Date and time when MongoDB Cloud created this cluster. This parameter expresses its value in ISO 8601 format in UTC.", + MarkdownDescription: "Date and time when MongoDB Cloud created this cluster. This parameter expresses its value in ISO 8601 format in UTC.", + }, + "disk_warming_mode": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Disk warming mode selection.", + MarkdownDescription: "Disk warming mode selection.", + Default: stringdefault.StaticString("FULLY_WARMED"), + }, + "encryption_at_rest_provider": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Cloud service provider that manages your customer keys to provide an additional layer of encryption at rest for the cluster. To enable customer key management for encryption at rest, the cluster **replicationSpecs[n].regionConfigs[m].{type}Specs.instanceSize** setting must be `M10` or higher and `\"backupEnabled\" : false` or omitted entirely.", + MarkdownDescription: "Cloud service provider that manages your customer keys to provide an additional layer of encryption at rest for the cluster. To enable customer key management for encryption at rest, the cluster **replicationSpecs[n].regionConfigs[m].{type}Specs.instanceSize** setting must be `M10` or higher and `\"backupEnabled\" : false` or omitted entirely.", + }, + "feature_compatibility_version": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Feature compatibility version of the cluster.", + MarkdownDescription: "Feature compatibility version of the cluster.", + }, + "feature_compatibility_version_expiration_date": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Feature compatibility version expiration date.", + MarkdownDescription: "Feature compatibility version expiration date.", + }, + "global_cluster_self_managed_sharding": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Set this field to configure the Sharding Management Mode when creating a new Global Cluster.\n\nWhen set to false, the management mode is set to Atlas-Managed Sharding. This mode fully manages the sharding of your Global Cluster and is built to provide a seamless deployment experience.\n\nWhen set to true, the management mode is set to Self-Managed Sharding. This mode leaves the management of shards in your hands and is built to provide an advanced and flexible deployment experience.\n\nThis setting cannot be changed once the cluster is deployed.", + MarkdownDescription: "Set this field to configure the Sharding Management Mode when creating a new Global Cluster.\n\nWhen set to false, the management mode is set to Atlas-Managed Sharding. This mode fully manages the sharding of your Global Cluster and is built to provide a seamless deployment experience.\n\nWhen set to true, the management mode is set to Self-Managed Sharding. This mode leaves the management of shards in your hands and is built to provide an advanced and flexible deployment experience.\n\nThis setting cannot be changed once the cluster is deployed.", + }, + "group_id": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Unique 24-hexadecimal character string that identifies the project.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies the project.", + Validators: []validator.String{ + stringvalidator.LengthBetween(24, 24), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-f0-9]{24})$"), ""), + }, + }, + "id": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the cluster.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the cluster.", + Validators: []validator.String{ + stringvalidator.LengthBetween(24, 24), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-f0-9]{24})$"), ""), + }, + }, + "labels": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Key applied to tag and categorize this component.", + MarkdownDescription: "Key applied to tag and categorize this component.", + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + "value": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Value set to the Key applied to tag and categorize this component.", + MarkdownDescription: "Value set to the Key applied to tag and categorize this component.", + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + }, + CustomType: LabelsType{ + ObjectType: types.ObjectType{ + AttrTypes: LabelsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "Collection of key-value pairs between 1 to 255 characters in length that tag and categorize the cluster. The MongoDB Cloud console doesn't display your labels.\n\nCluster labels are deprecated and will be removed in a future release. We strongly recommend that you use [resource tags](https://dochub.mongodb.org/core/add-cluster-tag-atlas) instead.", + MarkdownDescription: "Collection of key-value pairs between 1 to 255 characters in length that tag and categorize the cluster. The MongoDB Cloud console doesn't display your labels.\n\nCluster labels are deprecated and will be removed in a future release. We strongly recommend that you use [resource tags](https://dochub.mongodb.org/core/add-cluster-tag-atlas) instead.", + DeprecationMessage: "This attribute is deprecated.", + }, + "links": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "href": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + "rel": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + }, + CustomType: LinksType{ + ObjectType: types.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + MarkdownDescription: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + }, + "mongo_dbemployee_access_grant": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "expiration_time": schema.StringAttribute{ + Required: true, + Description: "Expiration date for the employee access grant.", + MarkdownDescription: "Expiration date for the employee access grant.", + }, + "grant_type": schema.StringAttribute{ + Required: true, + Description: "Level of access to grant to MongoDB Employees.", + MarkdownDescription: "Level of access to grant to MongoDB Employees.", + }, + "links": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "href": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that points another API resource to which this response has some relationship. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + "rel": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + MarkdownDescription: "Uniform Resource Locator (URL) that defines the semantic relationship between this resource and another API resource. This URL often begins with `https://cloud.mongodb.com/api/atlas`.", + }, + }, + CustomType: LinksType{ + ObjectType: types.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + MarkdownDescription: "List of one or more Uniform Resource Locators (URLs) that point to API sub-resources, related API resources, or both. RFC 5988 outlines these relationships.", + }, + }, + CustomType: MongoDbemployeeAccessGrantType{ + ObjectType: types.ObjectType{ + AttrTypes: MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "MongoDB employee granted access level and expiration for a cluster.", + MarkdownDescription: "MongoDB employee granted access level and expiration for a cluster.", + }, + "mongo_dbmajor_version": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "MongoDB major version of the cluster.\n\nOn creation: Choose from the available versions of MongoDB, or leave unspecified for the current recommended default in the MongoDB Cloud platform. The recommended version is a recent Long Term Support version. The default is not guaranteed to be the most recently released version throughout the entire release cycle. For versions available in a specific project, see the linked documentation or use the API endpoint for [project LTS versions endpoint](#tag/Projects/operation/getProjectLTSVersions).\n\n On update: Increase version only by 1 major version at a time. If the cluster is pinned to a MongoDB feature compatibility version exactly one major version below the current MongoDB version, the MongoDB version can be downgraded to the previous major version.", + MarkdownDescription: "MongoDB major version of the cluster.\n\nOn creation: Choose from the available versions of MongoDB, or leave unspecified for the current recommended default in the MongoDB Cloud platform. The recommended version is a recent Long Term Support version. The default is not guaranteed to be the most recently released version throughout the entire release cycle. For versions available in a specific project, see the linked documentation or use the API endpoint for [project LTS versions endpoint](#tag/Projects/operation/getProjectLTSVersions).\n\n On update: Increase version only by 1 major version at a time. If the cluster is pinned to a MongoDB feature compatibility version exactly one major version below the current MongoDB version, the MongoDB version can be downgraded to the previous major version.", + }, + "mongo_dbversion": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Version of MongoDB that the cluster runs.", + MarkdownDescription: "Version of MongoDB that the cluster runs.", + Validators: []validator.String{ + stringvalidator.RegexMatches(regexp.MustCompile("([\\d]+\\.[\\d]+\\.[\\d]+)"), ""), + }, + }, + "name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Human-readable label that identifies the cluster.", + MarkdownDescription: "Human-readable label that identifies the cluster.", + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 64), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-zA-Z0-9][a-zA-Z0-9-]*)?[a-zA-Z0-9]+$"), ""), + }, + }, + "paused": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether the cluster is paused.", + MarkdownDescription: "Flag that indicates whether the cluster is paused.", + }, + "pit_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether the cluster uses continuous cloud backups.", + MarkdownDescription: "Flag that indicates whether the cluster uses continuous cloud backups.", + }, + "redact_client_log_data": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Enable or disable log redaction.\n\nThis setting configures the ``mongod`` or ``mongos`` to redact any document field contents from a message accompanying a given log event before logging. This prevents the program from writing potentially sensitive data stored on the database to the diagnostic log. Metadata such as error or operation codes, line numbers, and source file names are still visible in the logs.\n\nUse ``redactClientLogData`` in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements.\n\n*Note*: changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated.", + MarkdownDescription: "Enable or disable log redaction.\n\nThis setting configures the ``mongod`` or ``mongos`` to redact any document field contents from a message accompanying a given log event before logging. This prevents the program from writing potentially sensitive data stored on the database to the diagnostic log. Metadata such as error or operation codes, line numbers, and source file names are still visible in the logs.\n\nUse ``redactClientLogData`` in conjunction with Encryption at Rest and TLS/SSL (Transport Encryption) to assist compliance with regulatory requirements.\n\n*Note*: changing this setting on a cluster will trigger a rolling restart as soon as the cluster is updated.", + }, + "replica_set_scaling_strategy": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Set this field to configure the replica set scaling mode for your cluster.\n\nBy default, Atlas scales under WORKLOAD_TYPE. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes.\n\nWhen configured as SEQUENTIAL, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads.\n\nWhen configured as NODE_TYPE, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads.", + MarkdownDescription: "Set this field to configure the replica set scaling mode for your cluster.\n\nBy default, Atlas scales under WORKLOAD_TYPE. This mode allows Atlas to scale your analytics nodes in parallel to your operational nodes.\n\nWhen configured as SEQUENTIAL, Atlas scales all nodes sequentially. This mode is intended for steady-state workloads and applications performing latency-sensitive secondary reads.\n\nWhen configured as NODE_TYPE, Atlas scales your electable nodes in parallel with your read-only and analytics nodes. This mode is intended for large, dynamic workloads requiring frequent and timely cluster tier scaling. This is the fastest scaling strategy, but it might impact latency of workloads when performing extensive secondary reads.", + Default: stringdefault.StaticString("WORKLOAD_TYPE"), + }, + "replication_specs": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the replication object for a shard in a Cluster. If you include existing shard replication configurations in the request, you must specify this parameter. If you add a new shard to an existing Cluster, you may specify this parameter. The request deletes any existing shards in the Cluster that you exclude from the request. This corresponds to Shard ID displayed in the UI.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the replication object for a shard in a Cluster. If you include existing shard replication configurations in the request, you must specify this parameter. If you add a new shard to an existing Cluster, you may specify this parameter. The request deletes any existing shards in the Cluster that you exclude from the request. This corresponds to Shard ID displayed in the UI.", + Validators: []validator.String{ + stringvalidator.LengthBetween(24, 24), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-f0-9]{24})$"), ""), + }, + }, + "region_configs": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "analytics_auto_scaling": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + MarkdownDescription: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + }, + "max_instance_size": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "min_instance_size": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "scale_down_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + MarkdownDescription: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + }, + }, + CustomType: ComputeType{ + ObjectType: types.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Options that determine how this cluster handles CPU scaling.", + MarkdownDescription: "Options that determine how this cluster handles CPU scaling.", + }, + "disk_gb": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + MarkdownDescription: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + }, + }, + CustomType: DiskGbType{ + ObjectType: types.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Setting that enables disk auto-scaling.", + MarkdownDescription: "Setting that enables disk auto-scaling.", + }, + }, + CustomType: AnalyticsAutoScalingType{ + ObjectType: types.ObjectType{ + AttrTypes: AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Options that determine how this cluster handles resource scaling.", + MarkdownDescription: "Options that determine how this cluster handles resource scaling.", + }, + "analytics_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Optional: true, + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + Default: stringdefault.StaticString("STANDARD"), + }, + "instance_size": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + MarkdownDescription: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + }, + "node_count": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: AnalyticsSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: AnalyticsSpecsValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + MarkdownDescription: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + }, + "auto_scaling": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "compute": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + MarkdownDescription: "Flag that indicates whether someone enabled instance size auto-scaling.\n\n- Set to `true` to enable instance size auto-scaling. If enabled, you must specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.maxInstanceSize**.\n- Set to `false` to disable instance size automatic scaling.", + }, + "max_instance_size": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "min_instance_size": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + MarkdownDescription: "Minimum instance size to which your cluster can automatically scale. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.scaleDownEnabled\" : true`.", + }, + "scale_down_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + MarkdownDescription: "Flag that indicates whether the instance size may scale down. MongoDB Cloud requires this parameter if `\"replicationSpecs[n].regionConfigs[m].autoScaling.compute.enabled\" : true`. If you enable this option, specify a value for **replicationSpecs[n].regionConfigs[m].autoScaling.compute.minInstanceSize**.", + }, + }, + CustomType: ComputeType{ + ObjectType: types.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Options that determine how this cluster handles CPU scaling.", + MarkdownDescription: "Options that determine how this cluster handles CPU scaling.", + }, + "disk_gb": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + MarkdownDescription: "Flag that indicates whether this cluster enables disk auto-scaling. The maximum memory allowed for the selected cluster tier and the oplog size can limit storage auto-scaling.", + }, + }, + CustomType: DiskGbType{ + ObjectType: types.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Setting that enables disk auto-scaling.", + MarkdownDescription: "Setting that enables disk auto-scaling.", + }, + }, + CustomType: AutoScalingType{ + ObjectType: types.ObjectType{ + AttrTypes: AutoScalingValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Options that determine how this cluster handles resource scaling.", + MarkdownDescription: "Options that determine how this cluster handles resource scaling.", + }, + "backing_provider_name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Cloud service provider on which MongoDB Cloud provisioned the multi-tenant cluster. The resource returns this parameter when **providerName** is `TENANT` and **electableSpecs.instanceSize** is `M0`, `M2` or `M5`.", + MarkdownDescription: "Cloud service provider on which MongoDB Cloud provisioned the multi-tenant cluster. The resource returns this parameter when **providerName** is `TENANT` and **electableSpecs.instanceSize** is `M0`, `M2` or `M5`.", + }, + "electable_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Optional: true, + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + Default: stringdefault.StaticString("STANDARD"), + }, + "instance_size": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Hardware specification for the instances in this M0/M2/M5 tier cluster.", + MarkdownDescription: "Hardware specification for the instances in this M0/M2/M5 tier cluster.", + }, + "node_count": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: ElectableSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ElectableSpecsValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Hardware specifications for all electable nodes deployed in the region. Electable nodes can become the primary and can enable local reads. If you don't specify this option, MongoDB Cloud deploys no electable nodes to the region.", + MarkdownDescription: "Hardware specifications for all electable nodes deployed in the region. Electable nodes can become the primary and can enable local reads. If you don't specify this option, MongoDB Cloud deploys no electable nodes to the region.", + }, + "priority": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Precedence is given to this region when a primary election occurs. If your **regionConfigs** has only **readOnlySpecs**, **analyticsSpecs**, or both, set this value to `0`. If you have multiple **regionConfigs** objects (your cluster is multi-region or multi-cloud), they must have priorities in descending order. The highest priority is `7`.\n\n**Example:** If you have three regions, their priorities would be `7`, `6`, and `5` respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be `4` and `3` respectively.", + MarkdownDescription: "Precedence is given to this region when a primary election occurs. If your **regionConfigs** has only **readOnlySpecs**, **analyticsSpecs**, or both, set this value to `0`. If you have multiple **regionConfigs** objects (your cluster is multi-region or multi-cloud), they must have priorities in descending order. The highest priority is `7`.\n\n**Example:** If you have three regions, their priorities would be `7`, `6`, and `5` respectively. If you added two more regions for supporting electable nodes, the priorities of those regions would be `4` and `3` respectively.", + Validators: []validator.Int64{ + int64validator.Between(0, 7), + }, + }, + "provider_name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Cloud service provider on which MongoDB Cloud provisions the hosts. Set dedicated clusters to `AWS`, `GCP`, `AZURE` or `TENANT`.", + MarkdownDescription: "Cloud service provider on which MongoDB Cloud provisions the hosts. Set dedicated clusters to `AWS`, `GCP`, `AZURE` or `TENANT`.", + }, + "read_only_specs": schema.SingleNestedAttribute{ + Attributes: map[string]schema.Attribute{ + "disk_iops": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + MarkdownDescription: "Target throughput desired for storage attached to your Azure-provisioned cluster. Change this parameter if you:\n\n- set `\"replicationSpecs[n].regionConfigs[m].providerName\" : \"Azure\"`.\n- set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : \"M40\"` or greater not including `Mxx_NVME` tiers.\n\nThe maximum input/output operations per second (IOPS) depend on the selected **.instanceSize** and **.diskSizeGB**.\nThis parameter defaults to the cluster tier's standard IOPS value.\nChanging this value impacts cluster cost.", + }, + "disk_size_gb": schema.Float64Attribute{ + Optional: true, + Computed: true, + Description: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + MarkdownDescription: "Storage capacity of instance data volumes expressed in gigabytes. Increase this number to add capacity.\n\n This value must be equal for all shards and node types.\n\n This value is not configurable on M0/M2/M5 clusters.\n\n MongoDB Cloud requires this parameter if you set **replicationSpecs**.\n\n If you specify a disk size below the minimum (10 GB), this parameter defaults to the minimum disk size value. \n\n Storage charge calculations depend on whether you choose the default value or a custom value.\n\n The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require more storage space, consider upgrading your cluster to a higher tier.", + }, + "ebs_volume_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + MarkdownDescription: "Type of storage you want to attach to your AWS-provisioned cluster.\n\n- `STANDARD` volume types can't exceed the default input/output operations per second (IOPS) rate for the selected volume size. \n\n- `PROVISIONED` volume types must fall within the allowable IOPS range for the selected volume size. You must set this value to (`PROVISIONED`) for NVMe clusters.", + Default: stringdefault.StaticString("STANDARD"), + }, + "instance_size": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + MarkdownDescription: "Hardware specification for the instance sizes in this region in this shard. Each instance size has a default storage and memory capacity. Electable nodes and read-only nodes (known as \"base nodes\") within a single shard must use the same instance size. Analytics nodes can scale independently from base nodes within a shard. Both base nodes and analytics nodes can scale independently from their equivalents in other shards.", + }, + "node_count": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + MarkdownDescription: "Number of nodes of the given type for MongoDB Cloud to deploy to the region.", + }, + }, + CustomType: ReadOnlySpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ReadOnlySpecsValue{}.AttributeTypes(ctx), + }, + }, + Optional: true, + Computed: true, + Description: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + MarkdownDescription: "Hardware specifications for read-only nodes in the region. Read-only nodes can never become the primary member, but can enable local reads. If you don't specify this parameter, no read-only nodes are deployed to the region.", + }, + "region_name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Physical location of your MongoDB cluster nodes. The region you choose can affect network latency for clients accessing your databases. The region name is only returned in the response for single-region clusters. When MongoDB Cloud deploys a dedicated cluster, it checks if a VPC or VPC connection exists for that provider and region. If not, MongoDB Cloud creates them as part of the deployment. It assigns the VPC a Classless Inter-Domain Routing (CIDR) block. To limit a new VPC peering connection to one Classless Inter-Domain Routing (CIDR) block and region, create the connection first. Deploy the cluster after the connection starts. GCP Clusters and Multi-region clusters require one VPC peering connection for each region. MongoDB nodes can use only the peering connection that resides in the same region as the nodes to communicate with the peered VPC.", + MarkdownDescription: "Physical location of your MongoDB cluster nodes. The region you choose can affect network latency for clients accessing your databases. The region name is only returned in the response for single-region clusters. When MongoDB Cloud deploys a dedicated cluster, it checks if a VPC or VPC connection exists for that provider and region. If not, MongoDB Cloud creates them as part of the deployment. It assigns the VPC a Classless Inter-Domain Routing (CIDR) block. To limit a new VPC peering connection to one Classless Inter-Domain Routing (CIDR) block and region, create the connection first. Deploy the cluster after the connection starts. GCP Clusters and Multi-region clusters require one VPC peering connection for each region. MongoDB nodes can use only the peering connection that resides in the same region as the nodes to communicate with the peered VPC.", + }, + }, + CustomType: RegionConfigsType{ + ObjectType: types.ObjectType{ + AttrTypes: RegionConfigsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "Hardware specifications for nodes set for a given region. Each **regionConfigs** object describes the region's priority in elections and the number and type of MongoDB nodes that MongoDB Cloud deploys to the region. Each **regionConfigs** object must have either an **analyticsSpecs** object, **electableSpecs** object, or **readOnlySpecs** object. Tenant clusters only require **electableSpecs. Dedicated** clusters can specify any of these specifications, but must have at least one **electableSpecs** object within a **replicationSpec**.\n\n**Example:**\n\nIf you set `\"replicationSpecs[n].regionConfigs[m].analyticsSpecs.instanceSize\" : \"M30\"`, set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : `\"M30\"` if you have electable nodes and `\"replicationSpecs[n].regionConfigs[m].readOnlySpecs.instanceSize\" : `\"M30\"` if you have read-only nodes.", + MarkdownDescription: "Hardware specifications for nodes set for a given region. Each **regionConfigs** object describes the region's priority in elections and the number and type of MongoDB nodes that MongoDB Cloud deploys to the region. Each **regionConfigs** object must have either an **analyticsSpecs** object, **electableSpecs** object, or **readOnlySpecs** object. Tenant clusters only require **electableSpecs. Dedicated** clusters can specify any of these specifications, but must have at least one **electableSpecs** object within a **replicationSpec**.\n\n**Example:**\n\nIf you set `\"replicationSpecs[n].regionConfigs[m].analyticsSpecs.instanceSize\" : \"M30\"`, set `\"replicationSpecs[n].regionConfigs[m].electableSpecs.instanceSize\" : `\"M30\"` if you have electable nodes and `\"replicationSpecs[n].regionConfigs[m].readOnlySpecs.instanceSize\" : `\"M30\"` if you have read-only nodes.", + }, + "zone_id": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. This value can be used to configure Global Cluster backup policies.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the zone in a Global Cluster. This value can be used to configure Global Cluster backup policies.", + Validators: []validator.String{ + stringvalidator.LengthBetween(24, 24), + stringvalidator.RegexMatches(regexp.MustCompile("^([a-f0-9]{24})$"), ""), + }, + }, + "zone_name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Human-readable label that describes the zone this shard belongs to in a Global Cluster. Provide this value only if \"clusterType\" : \"GEOSHARDED\" but not \"selfManagedSharding\" : true.", + MarkdownDescription: "Human-readable label that describes the zone this shard belongs to in a Global Cluster. Provide this value only if \"clusterType\" : \"GEOSHARDED\" but not \"selfManagedSharding\" : true.", + }, + }, + CustomType: ReplicationSpecsType{ + ObjectType: types.ObjectType{ + AttrTypes: ReplicationSpecsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "List of settings that configure your cluster regions. This array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations.", + MarkdownDescription: "List of settings that configure your cluster regions. This array has one object per shard representing node configurations in each shard. For replica sets there is only one object representing node configurations.", + }, + "root_cert_type": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Root Certificate Authority that MongoDB Cloud cluster uses. MongoDB Cloud supports Internet Security Research Group.", + MarkdownDescription: "Root Certificate Authority that MongoDB Cloud cluster uses. MongoDB Cloud supports Internet Security Research Group.", + Default: stringdefault.StaticString("ISRGROOTX1"), + }, + "state_name": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Human-readable label that indicates the current operating condition of this cluster.", + MarkdownDescription: "Human-readable label that indicates the current operating condition of this cluster.", + }, + "tags": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Required: true, + Description: "Constant that defines the set of the tag. For example, `environment` in the `environment : production` tag.", + MarkdownDescription: "Constant that defines the set of the tag. For example, `environment` in the `environment : production` tag.", + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + "value": schema.StringAttribute{ + Required: true, + Description: "Variable that belongs to the set of the tag. For example, `production` in the `environment : production` tag.", + MarkdownDescription: "Variable that belongs to the set of the tag. For example, `production` in the `environment : production` tag.", + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + }, + CustomType: TagsType{ + ObjectType: types.ObjectType{ + AttrTypes: TagsValue{}.AttributeTypes(ctx), + }, + }, + }, + Optional: true, + Computed: true, + Description: "List that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster.", + MarkdownDescription: "List that contains key-value pairs between 1 to 255 characters in length for tagging and categorizing the cluster.", + }, + "termination_protection_enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Flag that indicates whether termination protection is enabled on the cluster. If set to `true`, MongoDB Cloud won't delete the cluster. If set to `false`, MongoDB Cloud will delete the cluster.", + MarkdownDescription: "Flag that indicates whether termination protection is enabled on the cluster. If set to `true`, MongoDB Cloud won't delete the cluster. If set to `false`, MongoDB Cloud will delete the cluster.", + Default: booldefault.StaticBool(false), + }, + "version_release_system": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Method by which the cluster maintains the MongoDB versions. If value is `CONTINUOUS`, you must not specify **mongoDBMajorVersion**.", + MarkdownDescription: "Method by which the cluster maintains the MongoDB versions. If value is `CONTINUOUS`, you must not specify **mongoDBMajorVersion**.", + Default: stringdefault.StaticString("LTS"), + }, + }, + } +} + +type TFModel struct { + Labels types.List `tfsdk:"labels"` + Tags types.List `tfsdk:"tags"` + ReplicationSpecs types.List `tfsdk:"replication_specs"` + Links types.List `tfsdk:"links"` + CreateDate types.String `tfsdk:"create_date"` + ClusterName types.String `tfsdk:"cluster_name"` + ConfigServerType types.String `tfsdk:"config_server_type"` + VersionReleaseSystem types.String `tfsdk:"version_release_system"` + AcceptDataRisksAndForceReplicaSetReconfig types.String `tfsdk:"accept_data_risks_and_force_replica_set_reconfig"` + DiskWarmingMode types.String `tfsdk:"disk_warming_mode"` + EncryptionAtRestProvider types.String `tfsdk:"encryption_at_rest_provider"` + FeatureCompatibilityVersion types.String `tfsdk:"feature_compatibility_version"` + FeatureCompatibilityVersionExpirationDate types.String `tfsdk:"feature_compatibility_version_expiration_date"` + StateName types.String `tfsdk:"state_name"` + GroupId types.String `tfsdk:"group_id"` + Id types.String `tfsdk:"id"` + ClusterType types.String `tfsdk:"cluster_type"` + ConfigServerManagementMode types.String `tfsdk:"config_server_management_mode"` + RootCertType types.String `tfsdk:"root_cert_type"` + MongoDbmajorVersion types.String `tfsdk:"mongo_dbmajor_version"` + MongoDbversion types.String `tfsdk:"mongo_dbversion"` + Name types.String `tfsdk:"name"` + ReplicaSetScalingStrategy types.String `tfsdk:"replica_set_scaling_strategy"` + ConnectionStrings ConnectionStringsValue `tfsdk:"connection_strings"` + MongoDbemployeeAccessGrant MongoDbemployeeAccessGrantValue `tfsdk:"mongo_dbemployee_access_grant"` + BiConnector BiConnectorValue `tfsdk:"bi_connector"` + PitEnabled types.Bool `tfsdk:"pit_enabled"` + RedactClientLogData types.Bool `tfsdk:"redact_client_log_data"` + Paused types.Bool `tfsdk:"paused"` + GlobalClusterSelfManagedSharding types.Bool `tfsdk:"global_cluster_self_managed_sharding"` + BackupEnabled types.Bool `tfsdk:"backup_enabled"` + TerminationProtectionEnabled types.Bool `tfsdk:"termination_protection_enabled"` +} + +var _ basetypes.ObjectTypable = BiConnectorType{} + +type BiConnectorType struct { + basetypes.ObjectType +} + +func (t BiConnectorType) Equal(o attr.Type) bool { + other, ok := o.(BiConnectorType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t BiConnectorType) String() string { + return "BiConnectorType" +} + +func (t BiConnectorType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return nil, diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + readPreferenceAttribute, ok := attributes["read_preference"] + + if !ok { + diags.AddError( + "Attribute Missing", + `read_preference is missing from object`) + + return nil, diags + } + + readPreferenceVal, ok := readPreferenceAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`read_preference expected to be basetypes.StringValue, was: %T`, readPreferenceAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return BiConnectorValue{ + Enabled: enabledVal, + ReadPreference: readPreferenceVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewBiConnectorValueNull() BiConnectorValue { + return BiConnectorValue{ + state: attr.ValueStateNull, + } +} + +func NewBiConnectorValueUnknown() BiConnectorValue { + return BiConnectorValue{ + state: attr.ValueStateUnknown, + } +} + +func NewBiConnectorValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (BiConnectorValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing BiConnectorValue Attribute Value", + "While creating a BiConnectorValue value, a missing attribute value was detected. "+ + "A BiConnectorValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("BiConnectorValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid BiConnectorValue Attribute Type", + "While creating a BiConnectorValue value, an invalid attribute value was detected. "+ + "A BiConnectorValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("BiConnectorValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("BiConnectorValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra BiConnectorValue Attribute Value", + "While creating a BiConnectorValue value, an extra attribute value was detected. "+ + "A BiConnectorValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra BiConnectorValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewBiConnectorValueUnknown(), diags + } + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return NewBiConnectorValueUnknown(), diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + readPreferenceAttribute, ok := attributes["read_preference"] + + if !ok { + diags.AddError( + "Attribute Missing", + `read_preference is missing from object`) + + return NewBiConnectorValueUnknown(), diags + } + + readPreferenceVal, ok := readPreferenceAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`read_preference expected to be basetypes.StringValue, was: %T`, readPreferenceAttribute)) + } + + if diags.HasError() { + return NewBiConnectorValueUnknown(), diags + } + + return BiConnectorValue{ + Enabled: enabledVal, + ReadPreference: readPreferenceVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewBiConnectorValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) BiConnectorValue { + object, diags := NewBiConnectorValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewBiConnectorValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t BiConnectorType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewBiConnectorValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewBiConnectorValueUnknown(), nil + } + + if in.IsNull() { + return NewBiConnectorValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewBiConnectorValueMust(BiConnectorValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t BiConnectorType) ValueType(ctx context.Context) attr.Value { + return BiConnectorValue{} +} + +var _ basetypes.ObjectValuable = BiConnectorValue{} + +type BiConnectorValue struct { + ReadPreference basetypes.StringValue `tfsdk:"read_preference"` + Enabled basetypes.BoolValue `tfsdk:"enabled"` + state attr.ValueState +} + +func (v BiConnectorValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["enabled"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["read_preference"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Enabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["enabled"] = val + + val, err = v.ReadPreference.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["read_preference"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v BiConnectorValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v BiConnectorValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v BiConnectorValue) String() string { + return "BiConnectorValue" +} + +func (v BiConnectorValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + "read_preference": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "enabled": v.Enabled, + "read_preference": v.ReadPreference, + }) + + return objVal, diags +} + +func (v BiConnectorValue) Equal(o attr.Value) bool { + other, ok := o.(BiConnectorValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Enabled.Equal(other.Enabled) { + return false + } + + if !v.ReadPreference.Equal(other.ReadPreference) { + return false + } + + return true +} + +func (v BiConnectorValue) Type(ctx context.Context) attr.Type { + return BiConnectorType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v BiConnectorValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + "read_preference": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = ConnectionStringsType{} + +type ConnectionStringsType struct { + basetypes.ObjectType +} + +func (t ConnectionStringsType) Equal(o attr.Type) bool { + other, ok := o.(ConnectionStringsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ConnectionStringsType) String() string { + return "ConnectionStringsType" +} + +func (t ConnectionStringsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + awsPrivateLinkAttribute, ok := attributes["aws_private_link"] + + if !ok { + diags.AddError( + "Attribute Missing", + `aws_private_link is missing from object`) + + return nil, diags + } + + awsPrivateLinkVal, ok := awsPrivateLinkAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`aws_private_link expected to be basetypes.MapValue, was: %T`, awsPrivateLinkAttribute)) + } + + awsPrivateLinkSrvAttribute, ok := attributes["aws_private_link_srv"] + + if !ok { + diags.AddError( + "Attribute Missing", + `aws_private_link_srv is missing from object`) + + return nil, diags + } + + awsPrivateLinkSrvVal, ok := awsPrivateLinkSrvAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`aws_private_link_srv expected to be basetypes.MapValue, was: %T`, awsPrivateLinkSrvAttribute)) + } + + privateAttribute, ok := attributes["private"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private is missing from object`) + + return nil, diags + } + + privateVal, ok := privateAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private expected to be basetypes.StringValue, was: %T`, privateAttribute)) + } + + privateEndpointAttribute, ok := attributes["private_endpoint"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_endpoint is missing from object`) + + return nil, diags + } + + privateEndpointVal, ok := privateEndpointAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_endpoint expected to be basetypes.ListValue, was: %T`, privateEndpointAttribute)) + } + + privateSrvAttribute, ok := attributes["private_srv"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_srv is missing from object`) + + return nil, diags + } + + privateSrvVal, ok := privateSrvAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_srv expected to be basetypes.StringValue, was: %T`, privateSrvAttribute)) + } + + standardAttribute, ok := attributes["standard"] + + if !ok { + diags.AddError( + "Attribute Missing", + `standard is missing from object`) + + return nil, diags + } + + standardVal, ok := standardAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`standard expected to be basetypes.StringValue, was: %T`, standardAttribute)) + } + + standardSrvAttribute, ok := attributes["standard_srv"] + + if !ok { + diags.AddError( + "Attribute Missing", + `standard_srv is missing from object`) + + return nil, diags + } + + standardSrvVal, ok := standardSrvAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`standard_srv expected to be basetypes.StringValue, was: %T`, standardSrvAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ConnectionStringsValue{ + AwsPrivateLink: awsPrivateLinkVal, + AwsPrivateLinkSrv: awsPrivateLinkSrvVal, + Private: privateVal, + PrivateEndpoint: privateEndpointVal, + PrivateSrv: privateSrvVal, + Standard: standardVal, + StandardSrv: standardSrvVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewConnectionStringsValueNull() ConnectionStringsValue { + return ConnectionStringsValue{ + state: attr.ValueStateNull, + } +} + +func NewConnectionStringsValueUnknown() ConnectionStringsValue { + return ConnectionStringsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewConnectionStringsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ConnectionStringsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ConnectionStringsValue Attribute Value", + "While creating a ConnectionStringsValue value, a missing attribute value was detected. "+ + "A ConnectionStringsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ConnectionStringsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ConnectionStringsValue Attribute Type", + "While creating a ConnectionStringsValue value, an invalid attribute value was detected. "+ + "A ConnectionStringsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ConnectionStringsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ConnectionStringsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ConnectionStringsValue Attribute Value", + "While creating a ConnectionStringsValue value, an extra attribute value was detected. "+ + "A ConnectionStringsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ConnectionStringsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewConnectionStringsValueUnknown(), diags + } + + awsPrivateLinkAttribute, ok := attributes["aws_private_link"] + + if !ok { + diags.AddError( + "Attribute Missing", + `aws_private_link is missing from object`) + + return NewConnectionStringsValueUnknown(), diags + } + + awsPrivateLinkVal, ok := awsPrivateLinkAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`aws_private_link expected to be basetypes.MapValue, was: %T`, awsPrivateLinkAttribute)) + } + + awsPrivateLinkSrvAttribute, ok := attributes["aws_private_link_srv"] + + if !ok { + diags.AddError( + "Attribute Missing", + `aws_private_link_srv is missing from object`) + + return NewConnectionStringsValueUnknown(), diags + } + + awsPrivateLinkSrvVal, ok := awsPrivateLinkSrvAttribute.(basetypes.MapValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`aws_private_link_srv expected to be basetypes.MapValue, was: %T`, awsPrivateLinkSrvAttribute)) + } + + privateAttribute, ok := attributes["private"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private is missing from object`) + + return NewConnectionStringsValueUnknown(), diags + } + + privateVal, ok := privateAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private expected to be basetypes.StringValue, was: %T`, privateAttribute)) + } + + privateEndpointAttribute, ok := attributes["private_endpoint"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_endpoint is missing from object`) + + return NewConnectionStringsValueUnknown(), diags + } + + privateEndpointVal, ok := privateEndpointAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_endpoint expected to be basetypes.ListValue, was: %T`, privateEndpointAttribute)) + } + + privateSrvAttribute, ok := attributes["private_srv"] + + if !ok { + diags.AddError( + "Attribute Missing", + `private_srv is missing from object`) + + return NewConnectionStringsValueUnknown(), diags + } + + privateSrvVal, ok := privateSrvAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`private_srv expected to be basetypes.StringValue, was: %T`, privateSrvAttribute)) + } + + standardAttribute, ok := attributes["standard"] + + if !ok { + diags.AddError( + "Attribute Missing", + `standard is missing from object`) + + return NewConnectionStringsValueUnknown(), diags + } + + standardVal, ok := standardAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`standard expected to be basetypes.StringValue, was: %T`, standardAttribute)) + } + + standardSrvAttribute, ok := attributes["standard_srv"] + + if !ok { + diags.AddError( + "Attribute Missing", + `standard_srv is missing from object`) + + return NewConnectionStringsValueUnknown(), diags + } + + standardSrvVal, ok := standardSrvAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`standard_srv expected to be basetypes.StringValue, was: %T`, standardSrvAttribute)) + } + + if diags.HasError() { + return NewConnectionStringsValueUnknown(), diags + } + + return ConnectionStringsValue{ + AwsPrivateLink: awsPrivateLinkVal, + AwsPrivateLinkSrv: awsPrivateLinkSrvVal, + Private: privateVal, + PrivateEndpoint: privateEndpointVal, + PrivateSrv: privateSrvVal, + Standard: standardVal, + StandardSrv: standardSrvVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewConnectionStringsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ConnectionStringsValue { + object, diags := NewConnectionStringsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewConnectionStringsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ConnectionStringsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewConnectionStringsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewConnectionStringsValueUnknown(), nil + } + + if in.IsNull() { + return NewConnectionStringsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewConnectionStringsValueMust(ConnectionStringsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ConnectionStringsType) ValueType(ctx context.Context) attr.Value { + return ConnectionStringsValue{} +} + +var _ basetypes.ObjectValuable = ConnectionStringsValue{} + +type ConnectionStringsValue struct { + AwsPrivateLink basetypes.MapValue `tfsdk:"aws_private_link"` + AwsPrivateLinkSrv basetypes.MapValue `tfsdk:"aws_private_link_srv"` + Private basetypes.StringValue `tfsdk:"private"` + PrivateEndpoint basetypes.ListValue `tfsdk:"private_endpoint"` + PrivateSrv basetypes.StringValue `tfsdk:"private_srv"` + Standard basetypes.StringValue `tfsdk:"standard"` + StandardSrv basetypes.StringValue `tfsdk:"standard_srv"` + state attr.ValueState +} + +func (v ConnectionStringsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 7) + + var val tftypes.Value + var err error + + attrTypes["aws_private_link"] = basetypes.MapType{ + ElemType: types.StringType, + }.TerraformType(ctx) + attrTypes["aws_private_link_srv"] = basetypes.MapType{ + ElemType: types.StringType, + }.TerraformType(ctx) + attrTypes["private"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["private_endpoint"] = basetypes.ListType{ + ElemType: PrivateEndpointValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["private_srv"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["standard"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["standard_srv"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 7) + + val, err = v.AwsPrivateLink.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["aws_private_link"] = val + + val, err = v.AwsPrivateLinkSrv.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["aws_private_link_srv"] = val + + val, err = v.Private.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["private"] = val + + val, err = v.PrivateEndpoint.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["private_endpoint"] = val + + val, err = v.PrivateSrv.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["private_srv"] = val + + val, err = v.Standard.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["standard"] = val + + val, err = v.StandardSrv.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["standard_srv"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ConnectionStringsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ConnectionStringsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ConnectionStringsValue) String() string { + return "ConnectionStringsValue" +} + +func (v ConnectionStringsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + privateEndpoint := types.ListValueMust( + PrivateEndpointType{ + basetypes.ObjectType{ + AttrTypes: PrivateEndpointValue{}.AttributeTypes(ctx), + }, + }, + v.PrivateEndpoint.Elements(), + ) + + if v.PrivateEndpoint.IsNull() { + privateEndpoint = types.ListNull( + PrivateEndpointType{ + basetypes.ObjectType{ + AttrTypes: PrivateEndpointValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.PrivateEndpoint.IsUnknown() { + privateEndpoint = types.ListUnknown( + PrivateEndpointType{ + basetypes.ObjectType{ + AttrTypes: PrivateEndpointValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + var awsPrivateLinkVal basetypes.MapValue + switch { + case v.AwsPrivateLink.IsUnknown(): + awsPrivateLinkVal = types.MapUnknown(types.StringType) + case v.AwsPrivateLink.IsNull(): + awsPrivateLinkVal = types.MapNull(types.StringType) + default: + var d diag.Diagnostics + awsPrivateLinkVal, d = types.MapValue(types.StringType, v.AwsPrivateLink.Elements()) + diags.Append(d...) + } + + if diags.HasError() { + return types.ObjectUnknown(map[string]attr.Type{ + "aws_private_link": basetypes.MapType{ + ElemType: types.StringType, + }, + "aws_private_link_srv": basetypes.MapType{ + ElemType: types.StringType, + }, + "private": basetypes.StringType{}, + "private_endpoint": basetypes.ListType{ + ElemType: PrivateEndpointValue{}.Type(ctx), + }, + "private_srv": basetypes.StringType{}, + "standard": basetypes.StringType{}, + "standard_srv": basetypes.StringType{}, + }), diags + } + + var awsPrivateLinkSrvVal basetypes.MapValue + switch { + case v.AwsPrivateLinkSrv.IsUnknown(): + awsPrivateLinkSrvVal = types.MapUnknown(types.StringType) + case v.AwsPrivateLinkSrv.IsNull(): + awsPrivateLinkSrvVal = types.MapNull(types.StringType) + default: + var d diag.Diagnostics + awsPrivateLinkSrvVal, d = types.MapValue(types.StringType, v.AwsPrivateLinkSrv.Elements()) + diags.Append(d...) + } + + if diags.HasError() { + return types.ObjectUnknown(map[string]attr.Type{ + "aws_private_link": basetypes.MapType{ + ElemType: types.StringType, + }, + "aws_private_link_srv": basetypes.MapType{ + ElemType: types.StringType, + }, + "private": basetypes.StringType{}, + "private_endpoint": basetypes.ListType{ + ElemType: PrivateEndpointValue{}.Type(ctx), + }, + "private_srv": basetypes.StringType{}, + "standard": basetypes.StringType{}, + "standard_srv": basetypes.StringType{}, + }), diags + } + + attributeTypes := map[string]attr.Type{ + "aws_private_link": basetypes.MapType{ + ElemType: types.StringType, + }, + "aws_private_link_srv": basetypes.MapType{ + ElemType: types.StringType, + }, + "private": basetypes.StringType{}, + "private_endpoint": basetypes.ListType{ + ElemType: PrivateEndpointValue{}.Type(ctx), + }, + "private_srv": basetypes.StringType{}, + "standard": basetypes.StringType{}, + "standard_srv": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "aws_private_link": awsPrivateLinkVal, + "aws_private_link_srv": awsPrivateLinkSrvVal, + "private": v.Private, + "private_endpoint": privateEndpoint, + "private_srv": v.PrivateSrv, + "standard": v.Standard, + "standard_srv": v.StandardSrv, + }) + + return objVal, diags +} + +func (v ConnectionStringsValue) Equal(o attr.Value) bool { + other, ok := o.(ConnectionStringsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.AwsPrivateLink.Equal(other.AwsPrivateLink) { + return false + } + + if !v.AwsPrivateLinkSrv.Equal(other.AwsPrivateLinkSrv) { + return false + } + + if !v.Private.Equal(other.Private) { + return false + } + + if !v.PrivateEndpoint.Equal(other.PrivateEndpoint) { + return false + } + + if !v.PrivateSrv.Equal(other.PrivateSrv) { + return false + } + + if !v.Standard.Equal(other.Standard) { + return false + } + + if !v.StandardSrv.Equal(other.StandardSrv) { + return false + } + + return true +} + +func (v ConnectionStringsValue) Type(ctx context.Context) attr.Type { + return ConnectionStringsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ConnectionStringsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "aws_private_link": basetypes.MapType{ + ElemType: types.StringType, + }, + "aws_private_link_srv": basetypes.MapType{ + ElemType: types.StringType, + }, + "private": basetypes.StringType{}, + "private_endpoint": basetypes.ListType{ + ElemType: PrivateEndpointValue{}.Type(ctx), + }, + "private_srv": basetypes.StringType{}, + "standard": basetypes.StringType{}, + "standard_srv": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = PrivateEndpointType{} + +type PrivateEndpointType struct { + basetypes.ObjectType +} + +func (t PrivateEndpointType) Equal(o attr.Type) bool { + other, ok := o.(PrivateEndpointType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t PrivateEndpointType) String() string { + return "PrivateEndpointType" +} + +func (t PrivateEndpointType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + connectionStringAttribute, ok := attributes["connection_string"] + + if !ok { + diags.AddError( + "Attribute Missing", + `connection_string is missing from object`) + + return nil, diags + } + + connectionStringVal, ok := connectionStringAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`connection_string expected to be basetypes.StringValue, was: %T`, connectionStringAttribute)) + } + + endpointsAttribute, ok := attributes["endpoints"] + + if !ok { + diags.AddError( + "Attribute Missing", + `endpoints is missing from object`) + + return nil, diags + } + + endpointsVal, ok := endpointsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`endpoints expected to be basetypes.ListValue, was: %T`, endpointsAttribute)) + } + + srvConnectionStringAttribute, ok := attributes["srv_connection_string"] + + if !ok { + diags.AddError( + "Attribute Missing", + `srv_connection_string is missing from object`) + + return nil, diags + } + + srvConnectionStringVal, ok := srvConnectionStringAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`srv_connection_string expected to be basetypes.StringValue, was: %T`, srvConnectionStringAttribute)) + } + + srvShardOptimizedConnectionStringAttribute, ok := attributes["srv_shard_optimized_connection_string"] + + if !ok { + diags.AddError( + "Attribute Missing", + `srv_shard_optimized_connection_string is missing from object`) + + return nil, diags + } + + srvShardOptimizedConnectionStringVal, ok := srvShardOptimizedConnectionStringAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`srv_shard_optimized_connection_string expected to be basetypes.StringValue, was: %T`, srvShardOptimizedConnectionStringAttribute)) + } + + typeAttribute, ok := attributes["type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `type is missing from object`) + + return nil, diags + } + + typeVal, ok := typeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`type expected to be basetypes.StringValue, was: %T`, typeAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return PrivateEndpointValue{ + ConnectionString: connectionStringVal, + Endpoints: endpointsVal, + SrvConnectionString: srvConnectionStringVal, + SrvShardOptimizedConnectionString: srvShardOptimizedConnectionStringVal, + PrivateEndpointType: typeVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewPrivateEndpointValueNull() PrivateEndpointValue { + return PrivateEndpointValue{ + state: attr.ValueStateNull, + } +} + +func NewPrivateEndpointValueUnknown() PrivateEndpointValue { + return PrivateEndpointValue{ + state: attr.ValueStateUnknown, + } +} + +func NewPrivateEndpointValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (PrivateEndpointValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing PrivateEndpointValue Attribute Value", + "While creating a PrivateEndpointValue value, a missing attribute value was detected. "+ + "A PrivateEndpointValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("PrivateEndpointValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid PrivateEndpointValue Attribute Type", + "While creating a PrivateEndpointValue value, an invalid attribute value was detected. "+ + "A PrivateEndpointValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("PrivateEndpointValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("PrivateEndpointValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra PrivateEndpointValue Attribute Value", + "While creating a PrivateEndpointValue value, an extra attribute value was detected. "+ + "A PrivateEndpointValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra PrivateEndpointValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewPrivateEndpointValueUnknown(), diags + } + + connectionStringAttribute, ok := attributes["connection_string"] + + if !ok { + diags.AddError( + "Attribute Missing", + `connection_string is missing from object`) + + return NewPrivateEndpointValueUnknown(), diags + } + + connectionStringVal, ok := connectionStringAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`connection_string expected to be basetypes.StringValue, was: %T`, connectionStringAttribute)) + } + + endpointsAttribute, ok := attributes["endpoints"] + + if !ok { + diags.AddError( + "Attribute Missing", + `endpoints is missing from object`) + + return NewPrivateEndpointValueUnknown(), diags + } + + endpointsVal, ok := endpointsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`endpoints expected to be basetypes.ListValue, was: %T`, endpointsAttribute)) + } + + srvConnectionStringAttribute, ok := attributes["srv_connection_string"] + + if !ok { + diags.AddError( + "Attribute Missing", + `srv_connection_string is missing from object`) + + return NewPrivateEndpointValueUnknown(), diags + } + + srvConnectionStringVal, ok := srvConnectionStringAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`srv_connection_string expected to be basetypes.StringValue, was: %T`, srvConnectionStringAttribute)) + } + + srvShardOptimizedConnectionStringAttribute, ok := attributes["srv_shard_optimized_connection_string"] + + if !ok { + diags.AddError( + "Attribute Missing", + `srv_shard_optimized_connection_string is missing from object`) + + return NewPrivateEndpointValueUnknown(), diags + } + + srvShardOptimizedConnectionStringVal, ok := srvShardOptimizedConnectionStringAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`srv_shard_optimized_connection_string expected to be basetypes.StringValue, was: %T`, srvShardOptimizedConnectionStringAttribute)) + } + + typeAttribute, ok := attributes["type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `type is missing from object`) + + return NewPrivateEndpointValueUnknown(), diags + } + + typeVal, ok := typeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`type expected to be basetypes.StringValue, was: %T`, typeAttribute)) + } + + if diags.HasError() { + return NewPrivateEndpointValueUnknown(), diags + } + + return PrivateEndpointValue{ + ConnectionString: connectionStringVal, + Endpoints: endpointsVal, + SrvConnectionString: srvConnectionStringVal, + SrvShardOptimizedConnectionString: srvShardOptimizedConnectionStringVal, + PrivateEndpointType: typeVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewPrivateEndpointValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) PrivateEndpointValue { + object, diags := NewPrivateEndpointValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewPrivateEndpointValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t PrivateEndpointType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewPrivateEndpointValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewPrivateEndpointValueUnknown(), nil + } + + if in.IsNull() { + return NewPrivateEndpointValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewPrivateEndpointValueMust(PrivateEndpointValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t PrivateEndpointType) ValueType(ctx context.Context) attr.Value { + return PrivateEndpointValue{} +} + +var _ basetypes.ObjectValuable = PrivateEndpointValue{} + +type PrivateEndpointValue struct { + ConnectionString basetypes.StringValue `tfsdk:"connection_string"` + Endpoints basetypes.ListValue `tfsdk:"endpoints"` + SrvConnectionString basetypes.StringValue `tfsdk:"srv_connection_string"` + SrvShardOptimizedConnectionString basetypes.StringValue `tfsdk:"srv_shard_optimized_connection_string"` + PrivateEndpointType basetypes.StringValue `tfsdk:"type"` + state attr.ValueState +} + +func (v PrivateEndpointValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["connection_string"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["endpoints"] = basetypes.ListType{ + ElemType: EndpointsValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["srv_connection_string"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["srv_shard_optimized_connection_string"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["type"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.ConnectionString.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["connection_string"] = val + + val, err = v.Endpoints.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["endpoints"] = val + + val, err = v.SrvConnectionString.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["srv_connection_string"] = val + + val, err = v.SrvShardOptimizedConnectionString.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["srv_shard_optimized_connection_string"] = val + + val, err = v.PrivateEndpointType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["type"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v PrivateEndpointValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v PrivateEndpointValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v PrivateEndpointValue) String() string { + return "PrivateEndpointValue" +} + +func (v PrivateEndpointValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + endpoints := types.ListValueMust( + EndpointsType{ + basetypes.ObjectType{ + AttrTypes: EndpointsValue{}.AttributeTypes(ctx), + }, + }, + v.Endpoints.Elements(), + ) + + if v.Endpoints.IsNull() { + endpoints = types.ListNull( + EndpointsType{ + basetypes.ObjectType{ + AttrTypes: EndpointsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Endpoints.IsUnknown() { + endpoints = types.ListUnknown( + EndpointsType{ + basetypes.ObjectType{ + AttrTypes: EndpointsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "connection_string": basetypes.StringType{}, + "endpoints": basetypes.ListType{ + ElemType: EndpointsValue{}.Type(ctx), + }, + "srv_connection_string": basetypes.StringType{}, + "srv_shard_optimized_connection_string": basetypes.StringType{}, + "type": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "connection_string": v.ConnectionString, + "endpoints": endpoints, + "srv_connection_string": v.SrvConnectionString, + "srv_shard_optimized_connection_string": v.SrvShardOptimizedConnectionString, + "type": v.PrivateEndpointType, + }) + + return objVal, diags +} + +func (v PrivateEndpointValue) Equal(o attr.Value) bool { + other, ok := o.(PrivateEndpointValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.ConnectionString.Equal(other.ConnectionString) { + return false + } + + if !v.Endpoints.Equal(other.Endpoints) { + return false + } + + if !v.SrvConnectionString.Equal(other.SrvConnectionString) { + return false + } + + if !v.SrvShardOptimizedConnectionString.Equal(other.SrvShardOptimizedConnectionString) { + return false + } + + if !v.PrivateEndpointType.Equal(other.PrivateEndpointType) { + return false + } + + return true +} + +func (v PrivateEndpointValue) Type(ctx context.Context) attr.Type { + return PrivateEndpointType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v PrivateEndpointValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "connection_string": basetypes.StringType{}, + "endpoints": basetypes.ListType{ + ElemType: EndpointsValue{}.Type(ctx), + }, + "srv_connection_string": basetypes.StringType{}, + "srv_shard_optimized_connection_string": basetypes.StringType{}, + "type": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = EndpointsType{} + +type EndpointsType struct { + basetypes.ObjectType +} + +func (t EndpointsType) Equal(o attr.Type) bool { + other, ok := o.(EndpointsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t EndpointsType) String() string { + return "EndpointsType" +} + +func (t EndpointsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + endpointIdAttribute, ok := attributes["endpoint_id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `endpoint_id is missing from object`) + + return nil, diags + } + + endpointIdVal, ok := endpointIdAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`endpoint_id expected to be basetypes.StringValue, was: %T`, endpointIdAttribute)) + } + + providerNameAttribute, ok := attributes["provider_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `provider_name is missing from object`) + + return nil, diags + } + + providerNameVal, ok := providerNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`provider_name expected to be basetypes.StringValue, was: %T`, providerNameAttribute)) + } + + regionAttribute, ok := attributes["region"] + + if !ok { + diags.AddError( + "Attribute Missing", + `region is missing from object`) + + return nil, diags + } + + regionVal, ok := regionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`region expected to be basetypes.StringValue, was: %T`, regionAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return EndpointsValue{ + EndpointId: endpointIdVal, + ProviderName: providerNameVal, + Region: regionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewEndpointsValueNull() EndpointsValue { + return EndpointsValue{ + state: attr.ValueStateNull, + } +} + +func NewEndpointsValueUnknown() EndpointsValue { + return EndpointsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewEndpointsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (EndpointsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing EndpointsValue Attribute Value", + "While creating a EndpointsValue value, a missing attribute value was detected. "+ + "A EndpointsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("EndpointsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid EndpointsValue Attribute Type", + "While creating a EndpointsValue value, an invalid attribute value was detected. "+ + "A EndpointsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("EndpointsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("EndpointsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra EndpointsValue Attribute Value", + "While creating a EndpointsValue value, an extra attribute value was detected. "+ + "A EndpointsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra EndpointsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewEndpointsValueUnknown(), diags + } + + endpointIdAttribute, ok := attributes["endpoint_id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `endpoint_id is missing from object`) + + return NewEndpointsValueUnknown(), diags + } + + endpointIdVal, ok := endpointIdAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`endpoint_id expected to be basetypes.StringValue, was: %T`, endpointIdAttribute)) + } + + providerNameAttribute, ok := attributes["provider_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `provider_name is missing from object`) + + return NewEndpointsValueUnknown(), diags + } + + providerNameVal, ok := providerNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`provider_name expected to be basetypes.StringValue, was: %T`, providerNameAttribute)) + } + + regionAttribute, ok := attributes["region"] + + if !ok { + diags.AddError( + "Attribute Missing", + `region is missing from object`) + + return NewEndpointsValueUnknown(), diags + } + + regionVal, ok := regionAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`region expected to be basetypes.StringValue, was: %T`, regionAttribute)) + } + + if diags.HasError() { + return NewEndpointsValueUnknown(), diags + } + + return EndpointsValue{ + EndpointId: endpointIdVal, + ProviderName: providerNameVal, + Region: regionVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewEndpointsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) EndpointsValue { + object, diags := NewEndpointsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewEndpointsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t EndpointsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewEndpointsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewEndpointsValueUnknown(), nil + } + + if in.IsNull() { + return NewEndpointsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewEndpointsValueMust(EndpointsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t EndpointsType) ValueType(ctx context.Context) attr.Value { + return EndpointsValue{} +} + +var _ basetypes.ObjectValuable = EndpointsValue{} + +type EndpointsValue struct { + EndpointId basetypes.StringValue `tfsdk:"endpoint_id"` + ProviderName basetypes.StringValue `tfsdk:"provider_name"` + Region basetypes.StringValue `tfsdk:"region"` + state attr.ValueState +} + +func (v EndpointsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 3) + + var val tftypes.Value + var err error + + attrTypes["endpoint_id"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["provider_name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["region"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 3) + + val, err = v.EndpointId.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["endpoint_id"] = val + + val, err = v.ProviderName.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["provider_name"] = val + + val, err = v.Region.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["region"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v EndpointsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v EndpointsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v EndpointsValue) String() string { + return "EndpointsValue" +} + +func (v EndpointsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "endpoint_id": basetypes.StringType{}, + "provider_name": basetypes.StringType{}, + "region": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "endpoint_id": v.EndpointId, + "provider_name": v.ProviderName, + "region": v.Region, + }) + + return objVal, diags +} + +func (v EndpointsValue) Equal(o attr.Value) bool { + other, ok := o.(EndpointsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.EndpointId.Equal(other.EndpointId) { + return false + } + + if !v.ProviderName.Equal(other.ProviderName) { + return false + } + + if !v.Region.Equal(other.Region) { + return false + } + + return true +} + +func (v EndpointsValue) Type(ctx context.Context) attr.Type { + return EndpointsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v EndpointsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "endpoint_id": basetypes.StringType{}, + "provider_name": basetypes.StringType{}, + "region": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = LabelsType{} + +type LabelsType struct { + basetypes.ObjectType +} + +func (t LabelsType) Equal(o attr.Type) bool { + other, ok := o.(LabelsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t LabelsType) String() string { + return "LabelsType" +} + +func (t LabelsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return nil, diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return nil, diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return LabelsValue{ + Key: keyVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewLabelsValueNull() LabelsValue { + return LabelsValue{ + state: attr.ValueStateNull, + } +} + +func NewLabelsValueUnknown() LabelsValue { + return LabelsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewLabelsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (LabelsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing LabelsValue Attribute Value", + "While creating a LabelsValue value, a missing attribute value was detected. "+ + "A LabelsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("LabelsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid LabelsValue Attribute Type", + "While creating a LabelsValue value, an invalid attribute value was detected. "+ + "A LabelsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("LabelsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("LabelsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra LabelsValue Attribute Value", + "While creating a LabelsValue value, an extra attribute value was detected. "+ + "A LabelsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra LabelsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewLabelsValueUnknown(), diags + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return NewLabelsValueUnknown(), diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return NewLabelsValueUnknown(), diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return NewLabelsValueUnknown(), diags + } + + return LabelsValue{ + Key: keyVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewLabelsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) LabelsValue { + object, diags := NewLabelsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewLabelsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t LabelsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewLabelsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewLabelsValueUnknown(), nil + } + + if in.IsNull() { + return NewLabelsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewLabelsValueMust(LabelsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t LabelsType) ValueType(ctx context.Context) attr.Value { + return LabelsValue{} +} + +var _ basetypes.ObjectValuable = LabelsValue{} + +type LabelsValue struct { + Key basetypes.StringValue `tfsdk:"key"` + Value basetypes.StringValue `tfsdk:"value"` + state attr.ValueState +} + +func (v LabelsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["key"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["value"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Key.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["key"] = val + + val, err = v.Value.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["value"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v LabelsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v LabelsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v LabelsValue) String() string { + return "LabelsValue" +} + +func (v LabelsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "key": basetypes.StringType{}, + "value": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "key": v.Key, + "value": v.Value, + }) + + return objVal, diags +} + +func (v LabelsValue) Equal(o attr.Value) bool { + other, ok := o.(LabelsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Key.Equal(other.Key) { + return false + } + + if !v.Value.Equal(other.Value) { + return false + } + + return true +} + +func (v LabelsValue) Type(ctx context.Context) attr.Type { + return LabelsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v LabelsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "key": basetypes.StringType{}, + "value": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = LinksType{} + +type LinksType struct { + basetypes.ObjectType +} + +func (t LinksType) Equal(o attr.Type) bool { + other, ok := o.(LinksType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t LinksType) String() string { + return "LinksType" +} + +func (t LinksType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + hrefAttribute, ok := attributes["href"] + + if !ok { + diags.AddError( + "Attribute Missing", + `href is missing from object`) + + return nil, diags + } + + hrefVal, ok := hrefAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`href expected to be basetypes.StringValue, was: %T`, hrefAttribute)) + } + + relAttribute, ok := attributes["rel"] + + if !ok { + diags.AddError( + "Attribute Missing", + `rel is missing from object`) + + return nil, diags + } + + relVal, ok := relAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`rel expected to be basetypes.StringValue, was: %T`, relAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return LinksValue{ + Href: hrefVal, + Rel: relVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewLinksValueNull() LinksValue { + return LinksValue{ + state: attr.ValueStateNull, + } +} + +func NewLinksValueUnknown() LinksValue { + return LinksValue{ + state: attr.ValueStateUnknown, + } +} + +func NewLinksValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (LinksValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing LinksValue Attribute Value", + "While creating a LinksValue value, a missing attribute value was detected. "+ + "A LinksValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("LinksValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid LinksValue Attribute Type", + "While creating a LinksValue value, an invalid attribute value was detected. "+ + "A LinksValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("LinksValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("LinksValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra LinksValue Attribute Value", + "While creating a LinksValue value, an extra attribute value was detected. "+ + "A LinksValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra LinksValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewLinksValueUnknown(), diags + } + + hrefAttribute, ok := attributes["href"] + + if !ok { + diags.AddError( + "Attribute Missing", + `href is missing from object`) + + return NewLinksValueUnknown(), diags + } + + hrefVal, ok := hrefAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`href expected to be basetypes.StringValue, was: %T`, hrefAttribute)) + } + + relAttribute, ok := attributes["rel"] + + if !ok { + diags.AddError( + "Attribute Missing", + `rel is missing from object`) + + return NewLinksValueUnknown(), diags + } + + relVal, ok := relAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`rel expected to be basetypes.StringValue, was: %T`, relAttribute)) + } + + if diags.HasError() { + return NewLinksValueUnknown(), diags + } + + return LinksValue{ + Href: hrefVal, + Rel: relVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewLinksValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) LinksValue { + object, diags := NewLinksValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewLinksValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t LinksType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewLinksValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewLinksValueUnknown(), nil + } + + if in.IsNull() { + return NewLinksValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewLinksValueMust(LinksValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t LinksType) ValueType(ctx context.Context) attr.Value { + return LinksValue{} +} + +var _ basetypes.ObjectValuable = LinksValue{} + +type LinksValue struct { + Href basetypes.StringValue `tfsdk:"href"` + Rel basetypes.StringValue `tfsdk:"rel"` + state attr.ValueState +} + +func (v LinksValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["href"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["rel"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Href.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["href"] = val + + val, err = v.Rel.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["rel"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v LinksValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v LinksValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v LinksValue) String() string { + return "LinksValue" +} + +func (v LinksValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "href": basetypes.StringType{}, + "rel": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "href": v.Href, + "rel": v.Rel, + }) + + return objVal, diags +} + +func (v LinksValue) Equal(o attr.Value) bool { + other, ok := o.(LinksValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Href.Equal(other.Href) { + return false + } + + if !v.Rel.Equal(other.Rel) { + return false + } + + return true +} + +func (v LinksValue) Type(ctx context.Context) attr.Type { + return LinksType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v LinksValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "href": basetypes.StringType{}, + "rel": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = MongoDbemployeeAccessGrantType{} + +type MongoDbemployeeAccessGrantType struct { + basetypes.ObjectType +} + +func (t MongoDbemployeeAccessGrantType) Equal(o attr.Type) bool { + other, ok := o.(MongoDbemployeeAccessGrantType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t MongoDbemployeeAccessGrantType) String() string { + return "MongoDbemployeeAccessGrantType" +} + +func (t MongoDbemployeeAccessGrantType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + expirationTimeAttribute, ok := attributes["expiration_time"] + + if !ok { + diags.AddError( + "Attribute Missing", + `expiration_time is missing from object`) + + return nil, diags + } + + expirationTimeVal, ok := expirationTimeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`expiration_time expected to be basetypes.StringValue, was: %T`, expirationTimeAttribute)) + } + + grantTypeAttribute, ok := attributes["grant_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `grant_type is missing from object`) + + return nil, diags + } + + grantTypeVal, ok := grantTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`grant_type expected to be basetypes.StringValue, was: %T`, grantTypeAttribute)) + } + + linksAttribute, ok := attributes["links"] + + if !ok { + diags.AddError( + "Attribute Missing", + `links is missing from object`) + + return nil, diags + } + + linksVal, ok := linksAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`links expected to be basetypes.ListValue, was: %T`, linksAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return MongoDbemployeeAccessGrantValue{ + ExpirationTime: expirationTimeVal, + GrantType: grantTypeVal, + Links: linksVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewMongoDbemployeeAccessGrantValueNull() MongoDbemployeeAccessGrantValue { + return MongoDbemployeeAccessGrantValue{ + state: attr.ValueStateNull, + } +} + +func NewMongoDbemployeeAccessGrantValueUnknown() MongoDbemployeeAccessGrantValue { + return MongoDbemployeeAccessGrantValue{ + state: attr.ValueStateUnknown, + } +} + +func NewMongoDbemployeeAccessGrantValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (MongoDbemployeeAccessGrantValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing MongoDbemployeeAccessGrantValue Attribute Value", + "While creating a MongoDbemployeeAccessGrantValue value, a missing attribute value was detected. "+ + "A MongoDbemployeeAccessGrantValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("MongoDbemployeeAccessGrantValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid MongoDbemployeeAccessGrantValue Attribute Type", + "While creating a MongoDbemployeeAccessGrantValue value, an invalid attribute value was detected. "+ + "A MongoDbemployeeAccessGrantValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("MongoDbemployeeAccessGrantValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("MongoDbemployeeAccessGrantValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra MongoDbemployeeAccessGrantValue Attribute Value", + "While creating a MongoDbemployeeAccessGrantValue value, an extra attribute value was detected. "+ + "A MongoDbemployeeAccessGrantValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra MongoDbemployeeAccessGrantValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewMongoDbemployeeAccessGrantValueUnknown(), diags + } + + expirationTimeAttribute, ok := attributes["expiration_time"] + + if !ok { + diags.AddError( + "Attribute Missing", + `expiration_time is missing from object`) + + return NewMongoDbemployeeAccessGrantValueUnknown(), diags + } + + expirationTimeVal, ok := expirationTimeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`expiration_time expected to be basetypes.StringValue, was: %T`, expirationTimeAttribute)) + } + + grantTypeAttribute, ok := attributes["grant_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `grant_type is missing from object`) + + return NewMongoDbemployeeAccessGrantValueUnknown(), diags + } + + grantTypeVal, ok := grantTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`grant_type expected to be basetypes.StringValue, was: %T`, grantTypeAttribute)) + } + + linksAttribute, ok := attributes["links"] + + if !ok { + diags.AddError( + "Attribute Missing", + `links is missing from object`) + + return NewMongoDbemployeeAccessGrantValueUnknown(), diags + } + + linksVal, ok := linksAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`links expected to be basetypes.ListValue, was: %T`, linksAttribute)) + } + + if diags.HasError() { + return NewMongoDbemployeeAccessGrantValueUnknown(), diags + } + + return MongoDbemployeeAccessGrantValue{ + ExpirationTime: expirationTimeVal, + GrantType: grantTypeVal, + Links: linksVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewMongoDbemployeeAccessGrantValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) MongoDbemployeeAccessGrantValue { + object, diags := NewMongoDbemployeeAccessGrantValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewMongoDbemployeeAccessGrantValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t MongoDbemployeeAccessGrantType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewMongoDbemployeeAccessGrantValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewMongoDbemployeeAccessGrantValueUnknown(), nil + } + + if in.IsNull() { + return NewMongoDbemployeeAccessGrantValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewMongoDbemployeeAccessGrantValueMust(MongoDbemployeeAccessGrantValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t MongoDbemployeeAccessGrantType) ValueType(ctx context.Context) attr.Value { + return MongoDbemployeeAccessGrantValue{} +} + +var _ basetypes.ObjectValuable = MongoDbemployeeAccessGrantValue{} + +type MongoDbemployeeAccessGrantValue struct { + ExpirationTime basetypes.StringValue `tfsdk:"expiration_time"` + GrantType basetypes.StringValue `tfsdk:"grant_type"` + Links basetypes.ListValue `tfsdk:"links"` + state attr.ValueState +} + +func (v MongoDbemployeeAccessGrantValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 3) + + var val tftypes.Value + var err error + + attrTypes["expiration_time"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["grant_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["links"] = basetypes.ListType{ + ElemType: LinksValue{}.Type(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 3) + + val, err = v.ExpirationTime.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["expiration_time"] = val + + val, err = v.GrantType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["grant_type"] = val + + val, err = v.Links.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["links"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v MongoDbemployeeAccessGrantValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v MongoDbemployeeAccessGrantValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v MongoDbemployeeAccessGrantValue) String() string { + return "MongoDbemployeeAccessGrantValue" +} + +func (v MongoDbemployeeAccessGrantValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + links := types.ListValueMust( + LinksType{ + basetypes.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + v.Links.Elements(), + ) + + if v.Links.IsNull() { + links = types.ListNull( + LinksType{ + basetypes.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.Links.IsUnknown() { + links = types.ListUnknown( + LinksType{ + basetypes.ObjectType{ + AttrTypes: LinksValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "expiration_time": basetypes.StringType{}, + "grant_type": basetypes.StringType{}, + "links": basetypes.ListType{ + ElemType: LinksValue{}.Type(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "expiration_time": v.ExpirationTime, + "grant_type": v.GrantType, + "links": links, + }) + + return objVal, diags +} + +func (v MongoDbemployeeAccessGrantValue) Equal(o attr.Value) bool { + other, ok := o.(MongoDbemployeeAccessGrantValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.ExpirationTime.Equal(other.ExpirationTime) { + return false + } + + if !v.GrantType.Equal(other.GrantType) { + return false + } + + if !v.Links.Equal(other.Links) { + return false + } + + return true +} + +func (v MongoDbemployeeAccessGrantValue) Type(ctx context.Context) attr.Type { + return MongoDbemployeeAccessGrantType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v MongoDbemployeeAccessGrantValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "expiration_time": basetypes.StringType{}, + "grant_type": basetypes.StringType{}, + "links": basetypes.ListType{ + ElemType: LinksValue{}.Type(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = ReplicationSpecsType{} + +type ReplicationSpecsType struct { + basetypes.ObjectType +} + +func (t ReplicationSpecsType) Equal(o attr.Type) bool { + other, ok := o.(ReplicationSpecsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ReplicationSpecsType) String() string { + return "ReplicationSpecsType" +} + +func (t ReplicationSpecsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return nil, diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + regionConfigsAttribute, ok := attributes["region_configs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `region_configs is missing from object`) + + return nil, diags + } + + regionConfigsVal, ok := regionConfigsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`region_configs expected to be basetypes.ListValue, was: %T`, regionConfigsAttribute)) + } + + zoneIdAttribute, ok := attributes["zone_id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `zone_id is missing from object`) + + return nil, diags + } + + zoneIdVal, ok := zoneIdAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`zone_id expected to be basetypes.StringValue, was: %T`, zoneIdAttribute)) + } + + zoneNameAttribute, ok := attributes["zone_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `zone_name is missing from object`) + + return nil, diags + } + + zoneNameVal, ok := zoneNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`zone_name expected to be basetypes.StringValue, was: %T`, zoneNameAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ReplicationSpecsValue{ + Id: idVal, + RegionConfigs: regionConfigsVal, + ZoneId: zoneIdVal, + ZoneName: zoneNameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewReplicationSpecsValueNull() ReplicationSpecsValue { + return ReplicationSpecsValue{ + state: attr.ValueStateNull, + } +} + +func NewReplicationSpecsValueUnknown() ReplicationSpecsValue { + return ReplicationSpecsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewReplicationSpecsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ReplicationSpecsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ReplicationSpecsValue Attribute Value", + "While creating a ReplicationSpecsValue value, a missing attribute value was detected. "+ + "A ReplicationSpecsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ReplicationSpecsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ReplicationSpecsValue Attribute Type", + "While creating a ReplicationSpecsValue value, an invalid attribute value was detected. "+ + "A ReplicationSpecsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ReplicationSpecsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ReplicationSpecsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ReplicationSpecsValue Attribute Value", + "While creating a ReplicationSpecsValue value, an extra attribute value was detected. "+ + "A ReplicationSpecsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ReplicationSpecsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewReplicationSpecsValueUnknown(), diags + } + + idAttribute, ok := attributes["id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `id is missing from object`) + + return NewReplicationSpecsValueUnknown(), diags + } + + idVal, ok := idAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`id expected to be basetypes.StringValue, was: %T`, idAttribute)) + } + + regionConfigsAttribute, ok := attributes["region_configs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `region_configs is missing from object`) + + return NewReplicationSpecsValueUnknown(), diags + } + + regionConfigsVal, ok := regionConfigsAttribute.(basetypes.ListValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`region_configs expected to be basetypes.ListValue, was: %T`, regionConfigsAttribute)) + } + + zoneIdAttribute, ok := attributes["zone_id"] + + if !ok { + diags.AddError( + "Attribute Missing", + `zone_id is missing from object`) + + return NewReplicationSpecsValueUnknown(), diags + } + + zoneIdVal, ok := zoneIdAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`zone_id expected to be basetypes.StringValue, was: %T`, zoneIdAttribute)) + } + + zoneNameAttribute, ok := attributes["zone_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `zone_name is missing from object`) + + return NewReplicationSpecsValueUnknown(), diags + } + + zoneNameVal, ok := zoneNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`zone_name expected to be basetypes.StringValue, was: %T`, zoneNameAttribute)) + } + + if diags.HasError() { + return NewReplicationSpecsValueUnknown(), diags + } + + return ReplicationSpecsValue{ + Id: idVal, + RegionConfigs: regionConfigsVal, + ZoneId: zoneIdVal, + ZoneName: zoneNameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewReplicationSpecsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ReplicationSpecsValue { + object, diags := NewReplicationSpecsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewReplicationSpecsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ReplicationSpecsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewReplicationSpecsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewReplicationSpecsValueUnknown(), nil + } + + if in.IsNull() { + return NewReplicationSpecsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewReplicationSpecsValueMust(ReplicationSpecsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ReplicationSpecsType) ValueType(ctx context.Context) attr.Value { + return ReplicationSpecsValue{} +} + +var _ basetypes.ObjectValuable = ReplicationSpecsValue{} + +type ReplicationSpecsValue struct { + Id basetypes.StringValue `tfsdk:"id"` + RegionConfigs basetypes.ListValue `tfsdk:"region_configs"` + ZoneId basetypes.StringValue `tfsdk:"zone_id"` + ZoneName basetypes.StringValue `tfsdk:"zone_name"` + state attr.ValueState +} + +func (v ReplicationSpecsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 4) + + var val tftypes.Value + var err error + + attrTypes["id"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["region_configs"] = basetypes.ListType{ + ElemType: RegionConfigsValue{}.Type(ctx), + }.TerraformType(ctx) + attrTypes["zone_id"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["zone_name"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 4) + + val, err = v.Id.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["id"] = val + + val, err = v.RegionConfigs.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["region_configs"] = val + + val, err = v.ZoneId.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["zone_id"] = val + + val, err = v.ZoneName.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["zone_name"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ReplicationSpecsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ReplicationSpecsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ReplicationSpecsValue) String() string { + return "ReplicationSpecsValue" +} + +func (v ReplicationSpecsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + regionConfigs := types.ListValueMust( + RegionConfigsType{ + basetypes.ObjectType{ + AttrTypes: RegionConfigsValue{}.AttributeTypes(ctx), + }, + }, + v.RegionConfigs.Elements(), + ) + + if v.RegionConfigs.IsNull() { + regionConfigs = types.ListNull( + RegionConfigsType{ + basetypes.ObjectType{ + AttrTypes: RegionConfigsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + if v.RegionConfigs.IsUnknown() { + regionConfigs = types.ListUnknown( + RegionConfigsType{ + basetypes.ObjectType{ + AttrTypes: RegionConfigsValue{}.AttributeTypes(ctx), + }, + }, + ) + } + + attributeTypes := map[string]attr.Type{ + "id": basetypes.StringType{}, + "region_configs": basetypes.ListType{ + ElemType: RegionConfigsValue{}.Type(ctx), + }, + "zone_id": basetypes.StringType{}, + "zone_name": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "id": v.Id, + "region_configs": regionConfigs, + "zone_id": v.ZoneId, + "zone_name": v.ZoneName, + }) + + return objVal, diags +} + +func (v ReplicationSpecsValue) Equal(o attr.Value) bool { + other, ok := o.(ReplicationSpecsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Id.Equal(other.Id) { + return false + } + + if !v.RegionConfigs.Equal(other.RegionConfigs) { + return false + } + + if !v.ZoneId.Equal(other.ZoneId) { + return false + } + + if !v.ZoneName.Equal(other.ZoneName) { + return false + } + + return true +} + +func (v ReplicationSpecsValue) Type(ctx context.Context) attr.Type { + return ReplicationSpecsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ReplicationSpecsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "id": basetypes.StringType{}, + "region_configs": basetypes.ListType{ + ElemType: RegionConfigsValue{}.Type(ctx), + }, + "zone_id": basetypes.StringType{}, + "zone_name": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = RegionConfigsType{} + +type RegionConfigsType struct { + basetypes.ObjectType +} + +func (t RegionConfigsType) Equal(o attr.Type) bool { + other, ok := o.(RegionConfigsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t RegionConfigsType) String() string { + return "RegionConfigsType" +} + +func (t RegionConfigsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + analyticsAutoScalingAttribute, ok := attributes["analytics_auto_scaling"] + + if !ok { + diags.AddError( + "Attribute Missing", + `analytics_auto_scaling is missing from object`) + + return nil, diags + } + + analyticsAutoScalingVal, ok := analyticsAutoScalingAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`analytics_auto_scaling expected to be basetypes.ObjectValue, was: %T`, analyticsAutoScalingAttribute)) + } + + analyticsSpecsAttribute, ok := attributes["analytics_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `analytics_specs is missing from object`) + + return nil, diags + } + + analyticsSpecsVal, ok := analyticsSpecsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`analytics_specs expected to be basetypes.ObjectValue, was: %T`, analyticsSpecsAttribute)) + } + + autoScalingAttribute, ok := attributes["auto_scaling"] + + if !ok { + diags.AddError( + "Attribute Missing", + `auto_scaling is missing from object`) + + return nil, diags + } + + autoScalingVal, ok := autoScalingAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`auto_scaling expected to be basetypes.ObjectValue, was: %T`, autoScalingAttribute)) + } + + backingProviderNameAttribute, ok := attributes["backing_provider_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `backing_provider_name is missing from object`) + + return nil, diags + } + + backingProviderNameVal, ok := backingProviderNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`backing_provider_name expected to be basetypes.StringValue, was: %T`, backingProviderNameAttribute)) + } + + electableSpecsAttribute, ok := attributes["electable_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `electable_specs is missing from object`) + + return nil, diags + } + + electableSpecsVal, ok := electableSpecsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`electable_specs expected to be basetypes.ObjectValue, was: %T`, electableSpecsAttribute)) + } + + priorityAttribute, ok := attributes["priority"] + + if !ok { + diags.AddError( + "Attribute Missing", + `priority is missing from object`) + + return nil, diags + } + + priorityVal, ok := priorityAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`priority expected to be basetypes.Int64Value, was: %T`, priorityAttribute)) + } + + providerNameAttribute, ok := attributes["provider_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `provider_name is missing from object`) + + return nil, diags + } + + providerNameVal, ok := providerNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`provider_name expected to be basetypes.StringValue, was: %T`, providerNameAttribute)) + } + + readOnlySpecsAttribute, ok := attributes["read_only_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `read_only_specs is missing from object`) + + return nil, diags + } + + readOnlySpecsVal, ok := readOnlySpecsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`read_only_specs expected to be basetypes.ObjectValue, was: %T`, readOnlySpecsAttribute)) + } + + regionNameAttribute, ok := attributes["region_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `region_name is missing from object`) + + return nil, diags + } + + regionNameVal, ok := regionNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`region_name expected to be basetypes.StringValue, was: %T`, regionNameAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return RegionConfigsValue{ + AnalyticsAutoScaling: analyticsAutoScalingVal, + AnalyticsSpecs: analyticsSpecsVal, + AutoScaling: autoScalingVal, + BackingProviderName: backingProviderNameVal, + ElectableSpecs: electableSpecsVal, + Priority: priorityVal, + ProviderName: providerNameVal, + ReadOnlySpecs: readOnlySpecsVal, + RegionName: regionNameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewRegionConfigsValueNull() RegionConfigsValue { + return RegionConfigsValue{ + state: attr.ValueStateNull, + } +} + +func NewRegionConfigsValueUnknown() RegionConfigsValue { + return RegionConfigsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewRegionConfigsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (RegionConfigsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing RegionConfigsValue Attribute Value", + "While creating a RegionConfigsValue value, a missing attribute value was detected. "+ + "A RegionConfigsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("RegionConfigsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid RegionConfigsValue Attribute Type", + "While creating a RegionConfigsValue value, an invalid attribute value was detected. "+ + "A RegionConfigsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("RegionConfigsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("RegionConfigsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra RegionConfigsValue Attribute Value", + "While creating a RegionConfigsValue value, an extra attribute value was detected. "+ + "A RegionConfigsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra RegionConfigsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewRegionConfigsValueUnknown(), diags + } + + analyticsAutoScalingAttribute, ok := attributes["analytics_auto_scaling"] + + if !ok { + diags.AddError( + "Attribute Missing", + `analytics_auto_scaling is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + analyticsAutoScalingVal, ok := analyticsAutoScalingAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`analytics_auto_scaling expected to be basetypes.ObjectValue, was: %T`, analyticsAutoScalingAttribute)) + } + + analyticsSpecsAttribute, ok := attributes["analytics_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `analytics_specs is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + analyticsSpecsVal, ok := analyticsSpecsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`analytics_specs expected to be basetypes.ObjectValue, was: %T`, analyticsSpecsAttribute)) + } + + autoScalingAttribute, ok := attributes["auto_scaling"] + + if !ok { + diags.AddError( + "Attribute Missing", + `auto_scaling is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + autoScalingVal, ok := autoScalingAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`auto_scaling expected to be basetypes.ObjectValue, was: %T`, autoScalingAttribute)) + } + + backingProviderNameAttribute, ok := attributes["backing_provider_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `backing_provider_name is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + backingProviderNameVal, ok := backingProviderNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`backing_provider_name expected to be basetypes.StringValue, was: %T`, backingProviderNameAttribute)) + } + + electableSpecsAttribute, ok := attributes["electable_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `electable_specs is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + electableSpecsVal, ok := electableSpecsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`electable_specs expected to be basetypes.ObjectValue, was: %T`, electableSpecsAttribute)) + } + + priorityAttribute, ok := attributes["priority"] + + if !ok { + diags.AddError( + "Attribute Missing", + `priority is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + priorityVal, ok := priorityAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`priority expected to be basetypes.Int64Value, was: %T`, priorityAttribute)) + } + + providerNameAttribute, ok := attributes["provider_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `provider_name is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + providerNameVal, ok := providerNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`provider_name expected to be basetypes.StringValue, was: %T`, providerNameAttribute)) + } + + readOnlySpecsAttribute, ok := attributes["read_only_specs"] + + if !ok { + diags.AddError( + "Attribute Missing", + `read_only_specs is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + readOnlySpecsVal, ok := readOnlySpecsAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`read_only_specs expected to be basetypes.ObjectValue, was: %T`, readOnlySpecsAttribute)) + } + + regionNameAttribute, ok := attributes["region_name"] + + if !ok { + diags.AddError( + "Attribute Missing", + `region_name is missing from object`) + + return NewRegionConfigsValueUnknown(), diags + } + + regionNameVal, ok := regionNameAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`region_name expected to be basetypes.StringValue, was: %T`, regionNameAttribute)) + } + + if diags.HasError() { + return NewRegionConfigsValueUnknown(), diags + } + + return RegionConfigsValue{ + AnalyticsAutoScaling: analyticsAutoScalingVal, + AnalyticsSpecs: analyticsSpecsVal, + AutoScaling: autoScalingVal, + BackingProviderName: backingProviderNameVal, + ElectableSpecs: electableSpecsVal, + Priority: priorityVal, + ProviderName: providerNameVal, + ReadOnlySpecs: readOnlySpecsVal, + RegionName: regionNameVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewRegionConfigsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) RegionConfigsValue { + object, diags := NewRegionConfigsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewRegionConfigsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t RegionConfigsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewRegionConfigsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewRegionConfigsValueUnknown(), nil + } + + if in.IsNull() { + return NewRegionConfigsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewRegionConfigsValueMust(RegionConfigsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t RegionConfigsType) ValueType(ctx context.Context) attr.Value { + return RegionConfigsValue{} +} + +var _ basetypes.ObjectValuable = RegionConfigsValue{} + +type RegionConfigsValue struct { + AnalyticsAutoScaling basetypes.ObjectValue `tfsdk:"analytics_auto_scaling"` + AnalyticsSpecs basetypes.ObjectValue `tfsdk:"analytics_specs"` + AutoScaling basetypes.ObjectValue `tfsdk:"auto_scaling"` + BackingProviderName basetypes.StringValue `tfsdk:"backing_provider_name"` + ElectableSpecs basetypes.ObjectValue `tfsdk:"electable_specs"` + ProviderName basetypes.StringValue `tfsdk:"provider_name"` + ReadOnlySpecs basetypes.ObjectValue `tfsdk:"read_only_specs"` + RegionName basetypes.StringValue `tfsdk:"region_name"` + Priority basetypes.Int64Value `tfsdk:"priority"` + state attr.ValueState +} + +func (v RegionConfigsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 9) + + var val tftypes.Value + var err error + + attrTypes["analytics_auto_scaling"] = basetypes.ObjectType{ + AttrTypes: AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["analytics_specs"] = basetypes.ObjectType{ + AttrTypes: AnalyticsSpecsValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["auto_scaling"] = basetypes.ObjectType{ + AttrTypes: AutoScalingValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["backing_provider_name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["electable_specs"] = basetypes.ObjectType{ + AttrTypes: ElectableSpecsValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["priority"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["provider_name"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["read_only_specs"] = basetypes.ObjectType{ + AttrTypes: ReadOnlySpecsValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["region_name"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 9) + + val, err = v.AnalyticsAutoScaling.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["analytics_auto_scaling"] = val + + val, err = v.AnalyticsSpecs.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["analytics_specs"] = val + + val, err = v.AutoScaling.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["auto_scaling"] = val + + val, err = v.BackingProviderName.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["backing_provider_name"] = val + + val, err = v.ElectableSpecs.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["electable_specs"] = val + + val, err = v.Priority.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["priority"] = val + + val, err = v.ProviderName.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["provider_name"] = val + + val, err = v.ReadOnlySpecs.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["read_only_specs"] = val + + val, err = v.RegionName.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["region_name"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v RegionConfigsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v RegionConfigsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v RegionConfigsValue) String() string { + return "RegionConfigsValue" +} + +func (v RegionConfigsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var analyticsAutoScaling basetypes.ObjectValue + + if v.AnalyticsAutoScaling.IsNull() { + analyticsAutoScaling = types.ObjectNull( + AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + ) + } + + if v.AnalyticsAutoScaling.IsUnknown() { + analyticsAutoScaling = types.ObjectUnknown( + AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + ) + } + + if !v.AnalyticsAutoScaling.IsNull() && !v.AnalyticsAutoScaling.IsUnknown() { + analyticsAutoScaling = types.ObjectValueMust( + AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + v.AnalyticsAutoScaling.Attributes(), + ) + } + + var analyticsSpecs basetypes.ObjectValue + + if v.AnalyticsSpecs.IsNull() { + analyticsSpecs = types.ObjectNull( + AnalyticsSpecsValue{}.AttributeTypes(ctx), + ) + } + + if v.AnalyticsSpecs.IsUnknown() { + analyticsSpecs = types.ObjectUnknown( + AnalyticsSpecsValue{}.AttributeTypes(ctx), + ) + } + + if !v.AnalyticsSpecs.IsNull() && !v.AnalyticsSpecs.IsUnknown() { + analyticsSpecs = types.ObjectValueMust( + AnalyticsSpecsValue{}.AttributeTypes(ctx), + v.AnalyticsSpecs.Attributes(), + ) + } + + var autoScaling basetypes.ObjectValue + + if v.AutoScaling.IsNull() { + autoScaling = types.ObjectNull( + AutoScalingValue{}.AttributeTypes(ctx), + ) + } + + if v.AutoScaling.IsUnknown() { + autoScaling = types.ObjectUnknown( + AutoScalingValue{}.AttributeTypes(ctx), + ) + } + + if !v.AutoScaling.IsNull() && !v.AutoScaling.IsUnknown() { + autoScaling = types.ObjectValueMust( + AutoScalingValue{}.AttributeTypes(ctx), + v.AutoScaling.Attributes(), + ) + } + + var electableSpecs basetypes.ObjectValue + + if v.ElectableSpecs.IsNull() { + electableSpecs = types.ObjectNull( + ElectableSpecsValue{}.AttributeTypes(ctx), + ) + } + + if v.ElectableSpecs.IsUnknown() { + electableSpecs = types.ObjectUnknown( + ElectableSpecsValue{}.AttributeTypes(ctx), + ) + } + + if !v.ElectableSpecs.IsNull() && !v.ElectableSpecs.IsUnknown() { + electableSpecs = types.ObjectValueMust( + ElectableSpecsValue{}.AttributeTypes(ctx), + v.ElectableSpecs.Attributes(), + ) + } + + var readOnlySpecs basetypes.ObjectValue + + if v.ReadOnlySpecs.IsNull() { + readOnlySpecs = types.ObjectNull( + ReadOnlySpecsValue{}.AttributeTypes(ctx), + ) + } + + if v.ReadOnlySpecs.IsUnknown() { + readOnlySpecs = types.ObjectUnknown( + ReadOnlySpecsValue{}.AttributeTypes(ctx), + ) + } + + if !v.ReadOnlySpecs.IsNull() && !v.ReadOnlySpecs.IsUnknown() { + readOnlySpecs = types.ObjectValueMust( + ReadOnlySpecsValue{}.AttributeTypes(ctx), + v.ReadOnlySpecs.Attributes(), + ) + } + + attributeTypes := map[string]attr.Type{ + "analytics_auto_scaling": basetypes.ObjectType{ + AttrTypes: AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + }, + "analytics_specs": basetypes.ObjectType{ + AttrTypes: AnalyticsSpecsValue{}.AttributeTypes(ctx), + }, + "auto_scaling": basetypes.ObjectType{ + AttrTypes: AutoScalingValue{}.AttributeTypes(ctx), + }, + "backing_provider_name": basetypes.StringType{}, + "electable_specs": basetypes.ObjectType{ + AttrTypes: ElectableSpecsValue{}.AttributeTypes(ctx), + }, + "priority": basetypes.Int64Type{}, + "provider_name": basetypes.StringType{}, + "read_only_specs": basetypes.ObjectType{ + AttrTypes: ReadOnlySpecsValue{}.AttributeTypes(ctx), + }, + "region_name": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "analytics_auto_scaling": analyticsAutoScaling, + "analytics_specs": analyticsSpecs, + "auto_scaling": autoScaling, + "backing_provider_name": v.BackingProviderName, + "electable_specs": electableSpecs, + "priority": v.Priority, + "provider_name": v.ProviderName, + "read_only_specs": readOnlySpecs, + "region_name": v.RegionName, + }) + + return objVal, diags +} + +func (v RegionConfigsValue) Equal(o attr.Value) bool { + other, ok := o.(RegionConfigsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.AnalyticsAutoScaling.Equal(other.AnalyticsAutoScaling) { + return false + } + + if !v.AnalyticsSpecs.Equal(other.AnalyticsSpecs) { + return false + } + + if !v.AutoScaling.Equal(other.AutoScaling) { + return false + } + + if !v.BackingProviderName.Equal(other.BackingProviderName) { + return false + } + + if !v.ElectableSpecs.Equal(other.ElectableSpecs) { + return false + } + + if !v.Priority.Equal(other.Priority) { + return false + } + + if !v.ProviderName.Equal(other.ProviderName) { + return false + } + + if !v.ReadOnlySpecs.Equal(other.ReadOnlySpecs) { + return false + } + + if !v.RegionName.Equal(other.RegionName) { + return false + } + + return true +} + +func (v RegionConfigsValue) Type(ctx context.Context) attr.Type { + return RegionConfigsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v RegionConfigsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "analytics_auto_scaling": basetypes.ObjectType{ + AttrTypes: AnalyticsAutoScalingValue{}.AttributeTypes(ctx), + }, + "analytics_specs": basetypes.ObjectType{ + AttrTypes: AnalyticsSpecsValue{}.AttributeTypes(ctx), + }, + "auto_scaling": basetypes.ObjectType{ + AttrTypes: AutoScalingValue{}.AttributeTypes(ctx), + }, + "backing_provider_name": basetypes.StringType{}, + "electable_specs": basetypes.ObjectType{ + AttrTypes: ElectableSpecsValue{}.AttributeTypes(ctx), + }, + "priority": basetypes.Int64Type{}, + "provider_name": basetypes.StringType{}, + "read_only_specs": basetypes.ObjectType{ + AttrTypes: ReadOnlySpecsValue{}.AttributeTypes(ctx), + }, + "region_name": basetypes.StringType{}, + } +} + +var _ basetypes.ObjectTypable = AnalyticsAutoScalingType{} + +type AnalyticsAutoScalingType struct { + basetypes.ObjectType +} + +func (t AnalyticsAutoScalingType) Equal(o attr.Type) bool { + other, ok := o.(AnalyticsAutoScalingType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t AnalyticsAutoScalingType) String() string { + return "AnalyticsAutoScalingType" +} + +func (t AnalyticsAutoScalingType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + computeAttribute, ok := attributes["compute"] + + if !ok { + diags.AddError( + "Attribute Missing", + `compute is missing from object`) + + return nil, diags + } + + computeVal, ok := computeAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`compute expected to be basetypes.ObjectValue, was: %T`, computeAttribute)) + } + + diskGbAttribute, ok := attributes["disk_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_gb is missing from object`) + + return nil, diags + } + + diskGbVal, ok := diskGbAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_gb expected to be basetypes.ObjectValue, was: %T`, diskGbAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return AnalyticsAutoScalingValue{ + Compute: computeVal, + DiskGb: diskGbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewAnalyticsAutoScalingValueNull() AnalyticsAutoScalingValue { + return AnalyticsAutoScalingValue{ + state: attr.ValueStateNull, + } +} + +func NewAnalyticsAutoScalingValueUnknown() AnalyticsAutoScalingValue { + return AnalyticsAutoScalingValue{ + state: attr.ValueStateUnknown, + } +} + +func NewAnalyticsAutoScalingValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (AnalyticsAutoScalingValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing AnalyticsAutoScalingValue Attribute Value", + "While creating a AnalyticsAutoScalingValue value, a missing attribute value was detected. "+ + "A AnalyticsAutoScalingValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("AnalyticsAutoScalingValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid AnalyticsAutoScalingValue Attribute Type", + "While creating a AnalyticsAutoScalingValue value, an invalid attribute value was detected. "+ + "A AnalyticsAutoScalingValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("AnalyticsAutoScalingValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("AnalyticsAutoScalingValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra AnalyticsAutoScalingValue Attribute Value", + "While creating a AnalyticsAutoScalingValue value, an extra attribute value was detected. "+ + "A AnalyticsAutoScalingValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra AnalyticsAutoScalingValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewAnalyticsAutoScalingValueUnknown(), diags + } + + computeAttribute, ok := attributes["compute"] + + if !ok { + diags.AddError( + "Attribute Missing", + `compute is missing from object`) + + return NewAnalyticsAutoScalingValueUnknown(), diags + } + + computeVal, ok := computeAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`compute expected to be basetypes.ObjectValue, was: %T`, computeAttribute)) + } + + diskGbAttribute, ok := attributes["disk_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_gb is missing from object`) + + return NewAnalyticsAutoScalingValueUnknown(), diags + } + + diskGbVal, ok := diskGbAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_gb expected to be basetypes.ObjectValue, was: %T`, diskGbAttribute)) + } + + if diags.HasError() { + return NewAnalyticsAutoScalingValueUnknown(), diags + } + + return AnalyticsAutoScalingValue{ + Compute: computeVal, + DiskGb: diskGbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewAnalyticsAutoScalingValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) AnalyticsAutoScalingValue { + object, diags := NewAnalyticsAutoScalingValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewAnalyticsAutoScalingValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t AnalyticsAutoScalingType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewAnalyticsAutoScalingValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewAnalyticsAutoScalingValueUnknown(), nil + } + + if in.IsNull() { + return NewAnalyticsAutoScalingValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewAnalyticsAutoScalingValueMust(AnalyticsAutoScalingValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t AnalyticsAutoScalingType) ValueType(ctx context.Context) attr.Value { + return AnalyticsAutoScalingValue{} +} + +var _ basetypes.ObjectValuable = AnalyticsAutoScalingValue{} + +type AnalyticsAutoScalingValue struct { + Compute basetypes.ObjectValue `tfsdk:"compute"` + DiskGb basetypes.ObjectValue `tfsdk:"disk_gb"` + state attr.ValueState +} + +func (v AnalyticsAutoScalingValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["compute"] = basetypes.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["disk_gb"] = basetypes.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Compute.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["compute"] = val + + val, err = v.DiskGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_gb"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v AnalyticsAutoScalingValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v AnalyticsAutoScalingValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v AnalyticsAutoScalingValue) String() string { + return "AnalyticsAutoScalingValue" +} + +func (v AnalyticsAutoScalingValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var compute basetypes.ObjectValue + + if v.Compute.IsNull() { + compute = types.ObjectNull( + ComputeValue{}.AttributeTypes(ctx), + ) + } + + if v.Compute.IsUnknown() { + compute = types.ObjectUnknown( + ComputeValue{}.AttributeTypes(ctx), + ) + } + + if !v.Compute.IsNull() && !v.Compute.IsUnknown() { + compute = types.ObjectValueMust( + ComputeValue{}.AttributeTypes(ctx), + v.Compute.Attributes(), + ) + } + + var diskGb basetypes.ObjectValue + + if v.DiskGb.IsNull() { + diskGb = types.ObjectNull( + DiskGbValue{}.AttributeTypes(ctx), + ) + } + + if v.DiskGb.IsUnknown() { + diskGb = types.ObjectUnknown( + DiskGbValue{}.AttributeTypes(ctx), + ) + } + + if !v.DiskGb.IsNull() && !v.DiskGb.IsUnknown() { + diskGb = types.ObjectValueMust( + DiskGbValue{}.AttributeTypes(ctx), + v.DiskGb.Attributes(), + ) + } + + attributeTypes := map[string]attr.Type{ + "compute": basetypes.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + "disk_gb": basetypes.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "compute": compute, + "disk_gb": diskGb, + }) + + return objVal, diags +} + +func (v AnalyticsAutoScalingValue) Equal(o attr.Value) bool { + other, ok := o.(AnalyticsAutoScalingValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Compute.Equal(other.Compute) { + return false + } + + if !v.DiskGb.Equal(other.DiskGb) { + return false + } + + return true +} + +func (v AnalyticsAutoScalingValue) Type(ctx context.Context) attr.Type { + return AnalyticsAutoScalingType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v AnalyticsAutoScalingValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "compute": basetypes.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + "disk_gb": basetypes.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = ComputeType{} + +type ComputeType struct { + basetypes.ObjectType +} + +func (t ComputeType) Equal(o attr.Type) bool { + other, ok := o.(ComputeType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ComputeType) String() string { + return "ComputeType" +} + +func (t ComputeType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return nil, diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + maxInstanceSizeAttribute, ok := attributes["max_instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_instance_size is missing from object`) + + return nil, diags + } + + maxInstanceSizeVal, ok := maxInstanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_instance_size expected to be basetypes.StringValue, was: %T`, maxInstanceSizeAttribute)) + } + + minInstanceSizeAttribute, ok := attributes["min_instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `min_instance_size is missing from object`) + + return nil, diags + } + + minInstanceSizeVal, ok := minInstanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`min_instance_size expected to be basetypes.StringValue, was: %T`, minInstanceSizeAttribute)) + } + + scaleDownEnabledAttribute, ok := attributes["scale_down_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `scale_down_enabled is missing from object`) + + return nil, diags + } + + scaleDownEnabledVal, ok := scaleDownEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`scale_down_enabled expected to be basetypes.BoolValue, was: %T`, scaleDownEnabledAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ComputeValue{ + Enabled: enabledVal, + MaxInstanceSize: maxInstanceSizeVal, + MinInstanceSize: minInstanceSizeVal, + ScaleDownEnabled: scaleDownEnabledVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewComputeValueNull() ComputeValue { + return ComputeValue{ + state: attr.ValueStateNull, + } +} + +func NewComputeValueUnknown() ComputeValue { + return ComputeValue{ + state: attr.ValueStateUnknown, + } +} + +func NewComputeValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ComputeValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ComputeValue Attribute Value", + "While creating a ComputeValue value, a missing attribute value was detected. "+ + "A ComputeValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ComputeValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ComputeValue Attribute Type", + "While creating a ComputeValue value, an invalid attribute value was detected. "+ + "A ComputeValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ComputeValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ComputeValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ComputeValue Attribute Value", + "While creating a ComputeValue value, an extra attribute value was detected. "+ + "A ComputeValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ComputeValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewComputeValueUnknown(), diags + } + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return NewComputeValueUnknown(), diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + maxInstanceSizeAttribute, ok := attributes["max_instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `max_instance_size is missing from object`) + + return NewComputeValueUnknown(), diags + } + + maxInstanceSizeVal, ok := maxInstanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`max_instance_size expected to be basetypes.StringValue, was: %T`, maxInstanceSizeAttribute)) + } + + minInstanceSizeAttribute, ok := attributes["min_instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `min_instance_size is missing from object`) + + return NewComputeValueUnknown(), diags + } + + minInstanceSizeVal, ok := minInstanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`min_instance_size expected to be basetypes.StringValue, was: %T`, minInstanceSizeAttribute)) + } + + scaleDownEnabledAttribute, ok := attributes["scale_down_enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `scale_down_enabled is missing from object`) + + return NewComputeValueUnknown(), diags + } + + scaleDownEnabledVal, ok := scaleDownEnabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`scale_down_enabled expected to be basetypes.BoolValue, was: %T`, scaleDownEnabledAttribute)) + } + + if diags.HasError() { + return NewComputeValueUnknown(), diags + } + + return ComputeValue{ + Enabled: enabledVal, + MaxInstanceSize: maxInstanceSizeVal, + MinInstanceSize: minInstanceSizeVal, + ScaleDownEnabled: scaleDownEnabledVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewComputeValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ComputeValue { + object, diags := NewComputeValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewComputeValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ComputeType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewComputeValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewComputeValueUnknown(), nil + } + + if in.IsNull() { + return NewComputeValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewComputeValueMust(ComputeValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ComputeType) ValueType(ctx context.Context) attr.Value { + return ComputeValue{} +} + +var _ basetypes.ObjectValuable = ComputeValue{} + +type ComputeValue struct { + MaxInstanceSize basetypes.StringValue `tfsdk:"max_instance_size"` + MinInstanceSize basetypes.StringValue `tfsdk:"min_instance_size"` + Enabled basetypes.BoolValue `tfsdk:"enabled"` + ScaleDownEnabled basetypes.BoolValue `tfsdk:"scale_down_enabled"` + state attr.ValueState +} + +func (v ComputeValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 4) + + var val tftypes.Value + var err error + + attrTypes["enabled"] = basetypes.BoolType{}.TerraformType(ctx) + attrTypes["max_instance_size"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["min_instance_size"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["scale_down_enabled"] = basetypes.BoolType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 4) + + val, err = v.Enabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["enabled"] = val + + val, err = v.MaxInstanceSize.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["max_instance_size"] = val + + val, err = v.MinInstanceSize.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["min_instance_size"] = val + + val, err = v.ScaleDownEnabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["scale_down_enabled"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ComputeValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ComputeValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ComputeValue) String() string { + return "ComputeValue" +} + +func (v ComputeValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + "max_instance_size": basetypes.StringType{}, + "min_instance_size": basetypes.StringType{}, + "scale_down_enabled": basetypes.BoolType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "enabled": v.Enabled, + "max_instance_size": v.MaxInstanceSize, + "min_instance_size": v.MinInstanceSize, + "scale_down_enabled": v.ScaleDownEnabled, + }) + + return objVal, diags +} + +func (v ComputeValue) Equal(o attr.Value) bool { + other, ok := o.(ComputeValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Enabled.Equal(other.Enabled) { + return false + } + + if !v.MaxInstanceSize.Equal(other.MaxInstanceSize) { + return false + } + + if !v.MinInstanceSize.Equal(other.MinInstanceSize) { + return false + } + + if !v.ScaleDownEnabled.Equal(other.ScaleDownEnabled) { + return false + } + + return true +} + +func (v ComputeValue) Type(ctx context.Context) attr.Type { + return ComputeType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ComputeValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + "max_instance_size": basetypes.StringType{}, + "min_instance_size": basetypes.StringType{}, + "scale_down_enabled": basetypes.BoolType{}, + } +} + +var _ basetypes.ObjectTypable = DiskGbType{} + +type DiskGbType struct { + basetypes.ObjectType +} + +func (t DiskGbType) Equal(o attr.Type) bool { + other, ok := o.(DiskGbType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t DiskGbType) String() string { + return "DiskGbType" +} + +func (t DiskGbType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return nil, diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return DiskGbValue{ + Enabled: enabledVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewDiskGbValueNull() DiskGbValue { + return DiskGbValue{ + state: attr.ValueStateNull, + } +} + +func NewDiskGbValueUnknown() DiskGbValue { + return DiskGbValue{ + state: attr.ValueStateUnknown, + } +} + +func NewDiskGbValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (DiskGbValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing DiskGbValue Attribute Value", + "While creating a DiskGbValue value, a missing attribute value was detected. "+ + "A DiskGbValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("DiskGbValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid DiskGbValue Attribute Type", + "While creating a DiskGbValue value, an invalid attribute value was detected. "+ + "A DiskGbValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("DiskGbValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("DiskGbValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra DiskGbValue Attribute Value", + "While creating a DiskGbValue value, an extra attribute value was detected. "+ + "A DiskGbValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra DiskGbValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewDiskGbValueUnknown(), diags + } + + enabledAttribute, ok := attributes["enabled"] + + if !ok { + diags.AddError( + "Attribute Missing", + `enabled is missing from object`) + + return NewDiskGbValueUnknown(), diags + } + + enabledVal, ok := enabledAttribute.(basetypes.BoolValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`enabled expected to be basetypes.BoolValue, was: %T`, enabledAttribute)) + } + + if diags.HasError() { + return NewDiskGbValueUnknown(), diags + } + + return DiskGbValue{ + Enabled: enabledVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewDiskGbValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) DiskGbValue { + object, diags := NewDiskGbValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewDiskGbValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t DiskGbType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewDiskGbValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewDiskGbValueUnknown(), nil + } + + if in.IsNull() { + return NewDiskGbValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewDiskGbValueMust(DiskGbValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t DiskGbType) ValueType(ctx context.Context) attr.Value { + return DiskGbValue{} +} + +var _ basetypes.ObjectValuable = DiskGbValue{} + +type DiskGbValue struct { + Enabled basetypes.BoolValue `tfsdk:"enabled"` + state attr.ValueState +} + +func (v DiskGbValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 1) + + var val tftypes.Value + var err error + + attrTypes["enabled"] = basetypes.BoolType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 1) + + val, err = v.Enabled.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["enabled"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v DiskGbValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v DiskGbValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v DiskGbValue) String() string { + return "DiskGbValue" +} + +func (v DiskGbValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "enabled": v.Enabled, + }) + + return objVal, diags +} + +func (v DiskGbValue) Equal(o attr.Value) bool { + other, ok := o.(DiskGbValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Enabled.Equal(other.Enabled) { + return false + } + + return true +} + +func (v DiskGbValue) Type(ctx context.Context) attr.Type { + return DiskGbType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v DiskGbValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "enabled": basetypes.BoolType{}, + } +} + +var _ basetypes.ObjectTypable = AnalyticsSpecsType{} + +type AnalyticsSpecsType struct { + basetypes.ObjectType +} + +func (t AnalyticsSpecsType) Equal(o attr.Type) bool { + other, ok := o.(AnalyticsSpecsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t AnalyticsSpecsType) String() string { + return "AnalyticsSpecsType" +} + +func (t AnalyticsSpecsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + diskIopsAttribute, ok := attributes["disk_iops"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_iops is missing from object`) + + return nil, diags + } + + diskIopsVal, ok := diskIopsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_iops expected to be basetypes.Int64Value, was: %T`, diskIopsAttribute)) + } + + diskSizeGbAttribute, ok := attributes["disk_size_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_size_gb is missing from object`) + + return nil, diags + } + + diskSizeGbVal, ok := diskSizeGbAttribute.(basetypes.Float64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_size_gb expected to be basetypes.Float64Value, was: %T`, diskSizeGbAttribute)) + } + + ebsVolumeTypeAttribute, ok := attributes["ebs_volume_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ebs_volume_type is missing from object`) + + return nil, diags + } + + ebsVolumeTypeVal, ok := ebsVolumeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ebs_volume_type expected to be basetypes.StringValue, was: %T`, ebsVolumeTypeAttribute)) + } + + instanceSizeAttribute, ok := attributes["instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `instance_size is missing from object`) + + return nil, diags + } + + instanceSizeVal, ok := instanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`instance_size expected to be basetypes.StringValue, was: %T`, instanceSizeAttribute)) + } + + nodeCountAttribute, ok := attributes["node_count"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_count is missing from object`) + + return nil, diags + } + + nodeCountVal, ok := nodeCountAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_count expected to be basetypes.Int64Value, was: %T`, nodeCountAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return AnalyticsSpecsValue{ + DiskIops: diskIopsVal, + DiskSizeGb: diskSizeGbVal, + EbsVolumeType: ebsVolumeTypeVal, + InstanceSize: instanceSizeVal, + NodeCount: nodeCountVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewAnalyticsSpecsValueNull() AnalyticsSpecsValue { + return AnalyticsSpecsValue{ + state: attr.ValueStateNull, + } +} + +func NewAnalyticsSpecsValueUnknown() AnalyticsSpecsValue { + return AnalyticsSpecsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewAnalyticsSpecsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (AnalyticsSpecsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing AnalyticsSpecsValue Attribute Value", + "While creating a AnalyticsSpecsValue value, a missing attribute value was detected. "+ + "A AnalyticsSpecsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("AnalyticsSpecsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid AnalyticsSpecsValue Attribute Type", + "While creating a AnalyticsSpecsValue value, an invalid attribute value was detected. "+ + "A AnalyticsSpecsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("AnalyticsSpecsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("AnalyticsSpecsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra AnalyticsSpecsValue Attribute Value", + "While creating a AnalyticsSpecsValue value, an extra attribute value was detected. "+ + "A AnalyticsSpecsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra AnalyticsSpecsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewAnalyticsSpecsValueUnknown(), diags + } + + diskIopsAttribute, ok := attributes["disk_iops"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_iops is missing from object`) + + return NewAnalyticsSpecsValueUnknown(), diags + } + + diskIopsVal, ok := diskIopsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_iops expected to be basetypes.Int64Value, was: %T`, diskIopsAttribute)) + } + + diskSizeGbAttribute, ok := attributes["disk_size_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_size_gb is missing from object`) + + return NewAnalyticsSpecsValueUnknown(), diags + } + + diskSizeGbVal, ok := diskSizeGbAttribute.(basetypes.Float64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_size_gb expected to be basetypes.Float64Value, was: %T`, diskSizeGbAttribute)) + } + + ebsVolumeTypeAttribute, ok := attributes["ebs_volume_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ebs_volume_type is missing from object`) + + return NewAnalyticsSpecsValueUnknown(), diags + } + + ebsVolumeTypeVal, ok := ebsVolumeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ebs_volume_type expected to be basetypes.StringValue, was: %T`, ebsVolumeTypeAttribute)) + } + + instanceSizeAttribute, ok := attributes["instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `instance_size is missing from object`) + + return NewAnalyticsSpecsValueUnknown(), diags + } + + instanceSizeVal, ok := instanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`instance_size expected to be basetypes.StringValue, was: %T`, instanceSizeAttribute)) + } + + nodeCountAttribute, ok := attributes["node_count"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_count is missing from object`) + + return NewAnalyticsSpecsValueUnknown(), diags + } + + nodeCountVal, ok := nodeCountAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_count expected to be basetypes.Int64Value, was: %T`, nodeCountAttribute)) + } + + if diags.HasError() { + return NewAnalyticsSpecsValueUnknown(), diags + } + + return AnalyticsSpecsValue{ + DiskIops: diskIopsVal, + DiskSizeGb: diskSizeGbVal, + EbsVolumeType: ebsVolumeTypeVal, + InstanceSize: instanceSizeVal, + NodeCount: nodeCountVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewAnalyticsSpecsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) AnalyticsSpecsValue { + object, diags := NewAnalyticsSpecsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewAnalyticsSpecsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t AnalyticsSpecsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewAnalyticsSpecsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewAnalyticsSpecsValueUnknown(), nil + } + + if in.IsNull() { + return NewAnalyticsSpecsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewAnalyticsSpecsValueMust(AnalyticsSpecsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t AnalyticsSpecsType) ValueType(ctx context.Context) attr.Value { + return AnalyticsSpecsValue{} +} + +var _ basetypes.ObjectValuable = AnalyticsSpecsValue{} + +type AnalyticsSpecsValue struct { + DiskSizeGb basetypes.Float64Value `tfsdk:"disk_size_gb"` + EbsVolumeType basetypes.StringValue `tfsdk:"ebs_volume_type"` + InstanceSize basetypes.StringValue `tfsdk:"instance_size"` + DiskIops basetypes.Int64Value `tfsdk:"disk_iops"` + NodeCount basetypes.Int64Value `tfsdk:"node_count"` + state attr.ValueState +} + +func (v AnalyticsSpecsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["disk_iops"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["disk_size_gb"] = basetypes.Float64Type{}.TerraformType(ctx) + attrTypes["ebs_volume_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["instance_size"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["node_count"] = basetypes.Int64Type{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.DiskIops.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_iops"] = val + + val, err = v.DiskSizeGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_size_gb"] = val + + val, err = v.EbsVolumeType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["ebs_volume_type"] = val + + val, err = v.InstanceSize.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["instance_size"] = val + + val, err = v.NodeCount.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["node_count"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v AnalyticsSpecsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v AnalyticsSpecsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v AnalyticsSpecsValue) String() string { + return "AnalyticsSpecsValue" +} + +func (v AnalyticsSpecsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "disk_iops": basetypes.Int64Type{}, + "disk_size_gb": basetypes.Float64Type{}, + "ebs_volume_type": basetypes.StringType{}, + "instance_size": basetypes.StringType{}, + "node_count": basetypes.Int64Type{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "disk_iops": v.DiskIops, + "disk_size_gb": v.DiskSizeGb, + "ebs_volume_type": v.EbsVolumeType, + "instance_size": v.InstanceSize, + "node_count": v.NodeCount, + }) + + return objVal, diags +} + +func (v AnalyticsSpecsValue) Equal(o attr.Value) bool { + other, ok := o.(AnalyticsSpecsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.DiskIops.Equal(other.DiskIops) { + return false + } + + if !v.DiskSizeGb.Equal(other.DiskSizeGb) { + return false + } + + if !v.EbsVolumeType.Equal(other.EbsVolumeType) { + return false + } + + if !v.InstanceSize.Equal(other.InstanceSize) { + return false + } + + if !v.NodeCount.Equal(other.NodeCount) { + return false + } + + return true +} + +func (v AnalyticsSpecsValue) Type(ctx context.Context) attr.Type { + return AnalyticsSpecsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v AnalyticsSpecsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "disk_iops": basetypes.Int64Type{}, + "disk_size_gb": basetypes.Float64Type{}, + "ebs_volume_type": basetypes.StringType{}, + "instance_size": basetypes.StringType{}, + "node_count": basetypes.Int64Type{}, + } +} + +var _ basetypes.ObjectTypable = AutoScalingType{} + +type AutoScalingType struct { + basetypes.ObjectType +} + +func (t AutoScalingType) Equal(o attr.Type) bool { + other, ok := o.(AutoScalingType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t AutoScalingType) String() string { + return "AutoScalingType" +} + +func (t AutoScalingType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + computeAttribute, ok := attributes["compute"] + + if !ok { + diags.AddError( + "Attribute Missing", + `compute is missing from object`) + + return nil, diags + } + + computeVal, ok := computeAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`compute expected to be basetypes.ObjectValue, was: %T`, computeAttribute)) + } + + diskGbAttribute, ok := attributes["disk_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_gb is missing from object`) + + return nil, diags + } + + diskGbVal, ok := diskGbAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_gb expected to be basetypes.ObjectValue, was: %T`, diskGbAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return AutoScalingValue{ + Compute: computeVal, + DiskGb: diskGbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewAutoScalingValueNull() AutoScalingValue { + return AutoScalingValue{ + state: attr.ValueStateNull, + } +} + +func NewAutoScalingValueUnknown() AutoScalingValue { + return AutoScalingValue{ + state: attr.ValueStateUnknown, + } +} + +func NewAutoScalingValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (AutoScalingValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing AutoScalingValue Attribute Value", + "While creating a AutoScalingValue value, a missing attribute value was detected. "+ + "A AutoScalingValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("AutoScalingValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid AutoScalingValue Attribute Type", + "While creating a AutoScalingValue value, an invalid attribute value was detected. "+ + "A AutoScalingValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("AutoScalingValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("AutoScalingValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra AutoScalingValue Attribute Value", + "While creating a AutoScalingValue value, an extra attribute value was detected. "+ + "A AutoScalingValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra AutoScalingValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewAutoScalingValueUnknown(), diags + } + + computeAttribute, ok := attributes["compute"] + + if !ok { + diags.AddError( + "Attribute Missing", + `compute is missing from object`) + + return NewAutoScalingValueUnknown(), diags + } + + computeVal, ok := computeAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`compute expected to be basetypes.ObjectValue, was: %T`, computeAttribute)) + } + + diskGbAttribute, ok := attributes["disk_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_gb is missing from object`) + + return NewAutoScalingValueUnknown(), diags + } + + diskGbVal, ok := diskGbAttribute.(basetypes.ObjectValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_gb expected to be basetypes.ObjectValue, was: %T`, diskGbAttribute)) + } + + if diags.HasError() { + return NewAutoScalingValueUnknown(), diags + } + + return AutoScalingValue{ + Compute: computeVal, + DiskGb: diskGbVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewAutoScalingValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) AutoScalingValue { + object, diags := NewAutoScalingValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewAutoScalingValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t AutoScalingType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewAutoScalingValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewAutoScalingValueUnknown(), nil + } + + if in.IsNull() { + return NewAutoScalingValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewAutoScalingValueMust(AutoScalingValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t AutoScalingType) ValueType(ctx context.Context) attr.Value { + return AutoScalingValue{} +} + +var _ basetypes.ObjectValuable = AutoScalingValue{} + +type AutoScalingValue struct { + Compute basetypes.ObjectValue `tfsdk:"compute"` + DiskGb basetypes.ObjectValue `tfsdk:"disk_gb"` + state attr.ValueState +} + +func (v AutoScalingValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["compute"] = basetypes.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + attrTypes["disk_gb"] = basetypes.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Compute.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["compute"] = val + + val, err = v.DiskGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_gb"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v AutoScalingValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v AutoScalingValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v AutoScalingValue) String() string { + return "AutoScalingValue" +} + +func (v AutoScalingValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + var compute basetypes.ObjectValue + + if v.Compute.IsNull() { + compute = types.ObjectNull( + ComputeValue{}.AttributeTypes(ctx), + ) + } + + if v.Compute.IsUnknown() { + compute = types.ObjectUnknown( + ComputeValue{}.AttributeTypes(ctx), + ) + } + + if !v.Compute.IsNull() && !v.Compute.IsUnknown() { + compute = types.ObjectValueMust( + ComputeValue{}.AttributeTypes(ctx), + v.Compute.Attributes(), + ) + } + + var diskGb basetypes.ObjectValue + + if v.DiskGb.IsNull() { + diskGb = types.ObjectNull( + DiskGbValue{}.AttributeTypes(ctx), + ) + } + + if v.DiskGb.IsUnknown() { + diskGb = types.ObjectUnknown( + DiskGbValue{}.AttributeTypes(ctx), + ) + } + + if !v.DiskGb.IsNull() && !v.DiskGb.IsUnknown() { + diskGb = types.ObjectValueMust( + DiskGbValue{}.AttributeTypes(ctx), + v.DiskGb.Attributes(), + ) + } + + attributeTypes := map[string]attr.Type{ + "compute": basetypes.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + "disk_gb": basetypes.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "compute": compute, + "disk_gb": diskGb, + }) + + return objVal, diags +} + +func (v AutoScalingValue) Equal(o attr.Value) bool { + other, ok := o.(AutoScalingValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Compute.Equal(other.Compute) { + return false + } + + if !v.DiskGb.Equal(other.DiskGb) { + return false + } + + return true +} + +func (v AutoScalingValue) Type(ctx context.Context) attr.Type { + return AutoScalingType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v AutoScalingValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "compute": basetypes.ObjectType{ + AttrTypes: ComputeValue{}.AttributeTypes(ctx), + }, + "disk_gb": basetypes.ObjectType{ + AttrTypes: DiskGbValue{}.AttributeTypes(ctx), + }, + } +} + +var _ basetypes.ObjectTypable = ElectableSpecsType{} + +type ElectableSpecsType struct { + basetypes.ObjectType +} + +func (t ElectableSpecsType) Equal(o attr.Type) bool { + other, ok := o.(ElectableSpecsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ElectableSpecsType) String() string { + return "ElectableSpecsType" +} + +func (t ElectableSpecsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + diskIopsAttribute, ok := attributes["disk_iops"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_iops is missing from object`) + + return nil, diags + } + + diskIopsVal, ok := diskIopsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_iops expected to be basetypes.Int64Value, was: %T`, diskIopsAttribute)) + } + + diskSizeGbAttribute, ok := attributes["disk_size_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_size_gb is missing from object`) + + return nil, diags + } + + diskSizeGbVal, ok := diskSizeGbAttribute.(basetypes.Float64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_size_gb expected to be basetypes.Float64Value, was: %T`, diskSizeGbAttribute)) + } + + ebsVolumeTypeAttribute, ok := attributes["ebs_volume_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ebs_volume_type is missing from object`) + + return nil, diags + } + + ebsVolumeTypeVal, ok := ebsVolumeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ebs_volume_type expected to be basetypes.StringValue, was: %T`, ebsVolumeTypeAttribute)) + } + + instanceSizeAttribute, ok := attributes["instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `instance_size is missing from object`) + + return nil, diags + } + + instanceSizeVal, ok := instanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`instance_size expected to be basetypes.StringValue, was: %T`, instanceSizeAttribute)) + } + + nodeCountAttribute, ok := attributes["node_count"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_count is missing from object`) + + return nil, diags + } + + nodeCountVal, ok := nodeCountAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_count expected to be basetypes.Int64Value, was: %T`, nodeCountAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ElectableSpecsValue{ + DiskIops: diskIopsVal, + DiskSizeGb: diskSizeGbVal, + EbsVolumeType: ebsVolumeTypeVal, + InstanceSize: instanceSizeVal, + NodeCount: nodeCountVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewElectableSpecsValueNull() ElectableSpecsValue { + return ElectableSpecsValue{ + state: attr.ValueStateNull, + } +} + +func NewElectableSpecsValueUnknown() ElectableSpecsValue { + return ElectableSpecsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewElectableSpecsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ElectableSpecsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ElectableSpecsValue Attribute Value", + "While creating a ElectableSpecsValue value, a missing attribute value was detected. "+ + "A ElectableSpecsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ElectableSpecsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ElectableSpecsValue Attribute Type", + "While creating a ElectableSpecsValue value, an invalid attribute value was detected. "+ + "A ElectableSpecsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ElectableSpecsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ElectableSpecsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ElectableSpecsValue Attribute Value", + "While creating a ElectableSpecsValue value, an extra attribute value was detected. "+ + "A ElectableSpecsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ElectableSpecsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewElectableSpecsValueUnknown(), diags + } + + diskIopsAttribute, ok := attributes["disk_iops"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_iops is missing from object`) + + return NewElectableSpecsValueUnknown(), diags + } + + diskIopsVal, ok := diskIopsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_iops expected to be basetypes.Int64Value, was: %T`, diskIopsAttribute)) + } + + diskSizeGbAttribute, ok := attributes["disk_size_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_size_gb is missing from object`) + + return NewElectableSpecsValueUnknown(), diags + } + + diskSizeGbVal, ok := diskSizeGbAttribute.(basetypes.Float64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_size_gb expected to be basetypes.Float64Value, was: %T`, diskSizeGbAttribute)) + } + + ebsVolumeTypeAttribute, ok := attributes["ebs_volume_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ebs_volume_type is missing from object`) + + return NewElectableSpecsValueUnknown(), diags + } + + ebsVolumeTypeVal, ok := ebsVolumeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ebs_volume_type expected to be basetypes.StringValue, was: %T`, ebsVolumeTypeAttribute)) + } + + instanceSizeAttribute, ok := attributes["instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `instance_size is missing from object`) + + return NewElectableSpecsValueUnknown(), diags + } + + instanceSizeVal, ok := instanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`instance_size expected to be basetypes.StringValue, was: %T`, instanceSizeAttribute)) + } + + nodeCountAttribute, ok := attributes["node_count"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_count is missing from object`) + + return NewElectableSpecsValueUnknown(), diags + } + + nodeCountVal, ok := nodeCountAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_count expected to be basetypes.Int64Value, was: %T`, nodeCountAttribute)) + } + + if diags.HasError() { + return NewElectableSpecsValueUnknown(), diags + } + + return ElectableSpecsValue{ + DiskIops: diskIopsVal, + DiskSizeGb: diskSizeGbVal, + EbsVolumeType: ebsVolumeTypeVal, + InstanceSize: instanceSizeVal, + NodeCount: nodeCountVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewElectableSpecsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ElectableSpecsValue { + object, diags := NewElectableSpecsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewElectableSpecsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ElectableSpecsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewElectableSpecsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewElectableSpecsValueUnknown(), nil + } + + if in.IsNull() { + return NewElectableSpecsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewElectableSpecsValueMust(ElectableSpecsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ElectableSpecsType) ValueType(ctx context.Context) attr.Value { + return ElectableSpecsValue{} +} + +var _ basetypes.ObjectValuable = ElectableSpecsValue{} + +type ElectableSpecsValue struct { + DiskSizeGb basetypes.Float64Value `tfsdk:"disk_size_gb"` + EbsVolumeType basetypes.StringValue `tfsdk:"ebs_volume_type"` + InstanceSize basetypes.StringValue `tfsdk:"instance_size"` + DiskIops basetypes.Int64Value `tfsdk:"disk_iops"` + NodeCount basetypes.Int64Value `tfsdk:"node_count"` + state attr.ValueState +} + +func (v ElectableSpecsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["disk_iops"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["disk_size_gb"] = basetypes.Float64Type{}.TerraformType(ctx) + attrTypes["ebs_volume_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["instance_size"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["node_count"] = basetypes.Int64Type{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.DiskIops.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_iops"] = val + + val, err = v.DiskSizeGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_size_gb"] = val + + val, err = v.EbsVolumeType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["ebs_volume_type"] = val + + val, err = v.InstanceSize.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["instance_size"] = val + + val, err = v.NodeCount.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["node_count"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ElectableSpecsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ElectableSpecsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ElectableSpecsValue) String() string { + return "ElectableSpecsValue" +} + +func (v ElectableSpecsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "disk_iops": basetypes.Int64Type{}, + "disk_size_gb": basetypes.Float64Type{}, + "ebs_volume_type": basetypes.StringType{}, + "instance_size": basetypes.StringType{}, + "node_count": basetypes.Int64Type{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "disk_iops": v.DiskIops, + "disk_size_gb": v.DiskSizeGb, + "ebs_volume_type": v.EbsVolumeType, + "instance_size": v.InstanceSize, + "node_count": v.NodeCount, + }) + + return objVal, diags +} + +func (v ElectableSpecsValue) Equal(o attr.Value) bool { + other, ok := o.(ElectableSpecsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.DiskIops.Equal(other.DiskIops) { + return false + } + + if !v.DiskSizeGb.Equal(other.DiskSizeGb) { + return false + } + + if !v.EbsVolumeType.Equal(other.EbsVolumeType) { + return false + } + + if !v.InstanceSize.Equal(other.InstanceSize) { + return false + } + + if !v.NodeCount.Equal(other.NodeCount) { + return false + } + + return true +} + +func (v ElectableSpecsValue) Type(ctx context.Context) attr.Type { + return ElectableSpecsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ElectableSpecsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "disk_iops": basetypes.Int64Type{}, + "disk_size_gb": basetypes.Float64Type{}, + "ebs_volume_type": basetypes.StringType{}, + "instance_size": basetypes.StringType{}, + "node_count": basetypes.Int64Type{}, + } +} + +var _ basetypes.ObjectTypable = ReadOnlySpecsType{} + +type ReadOnlySpecsType struct { + basetypes.ObjectType +} + +func (t ReadOnlySpecsType) Equal(o attr.Type) bool { + other, ok := o.(ReadOnlySpecsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t ReadOnlySpecsType) String() string { + return "ReadOnlySpecsType" +} + +func (t ReadOnlySpecsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + diskIopsAttribute, ok := attributes["disk_iops"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_iops is missing from object`) + + return nil, diags + } + + diskIopsVal, ok := diskIopsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_iops expected to be basetypes.Int64Value, was: %T`, diskIopsAttribute)) + } + + diskSizeGbAttribute, ok := attributes["disk_size_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_size_gb is missing from object`) + + return nil, diags + } + + diskSizeGbVal, ok := diskSizeGbAttribute.(basetypes.Float64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_size_gb expected to be basetypes.Float64Value, was: %T`, diskSizeGbAttribute)) + } + + ebsVolumeTypeAttribute, ok := attributes["ebs_volume_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ebs_volume_type is missing from object`) + + return nil, diags + } + + ebsVolumeTypeVal, ok := ebsVolumeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ebs_volume_type expected to be basetypes.StringValue, was: %T`, ebsVolumeTypeAttribute)) + } + + instanceSizeAttribute, ok := attributes["instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `instance_size is missing from object`) + + return nil, diags + } + + instanceSizeVal, ok := instanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`instance_size expected to be basetypes.StringValue, was: %T`, instanceSizeAttribute)) + } + + nodeCountAttribute, ok := attributes["node_count"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_count is missing from object`) + + return nil, diags + } + + nodeCountVal, ok := nodeCountAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_count expected to be basetypes.Int64Value, was: %T`, nodeCountAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return ReadOnlySpecsValue{ + DiskIops: diskIopsVal, + DiskSizeGb: diskSizeGbVal, + EbsVolumeType: ebsVolumeTypeVal, + InstanceSize: instanceSizeVal, + NodeCount: nodeCountVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewReadOnlySpecsValueNull() ReadOnlySpecsValue { + return ReadOnlySpecsValue{ + state: attr.ValueStateNull, + } +} + +func NewReadOnlySpecsValueUnknown() ReadOnlySpecsValue { + return ReadOnlySpecsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewReadOnlySpecsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (ReadOnlySpecsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing ReadOnlySpecsValue Attribute Value", + "While creating a ReadOnlySpecsValue value, a missing attribute value was detected. "+ + "A ReadOnlySpecsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ReadOnlySpecsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid ReadOnlySpecsValue Attribute Type", + "While creating a ReadOnlySpecsValue value, an invalid attribute value was detected. "+ + "A ReadOnlySpecsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("ReadOnlySpecsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("ReadOnlySpecsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra ReadOnlySpecsValue Attribute Value", + "While creating a ReadOnlySpecsValue value, an extra attribute value was detected. "+ + "A ReadOnlySpecsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra ReadOnlySpecsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewReadOnlySpecsValueUnknown(), diags + } + + diskIopsAttribute, ok := attributes["disk_iops"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_iops is missing from object`) + + return NewReadOnlySpecsValueUnknown(), diags + } + + diskIopsVal, ok := diskIopsAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_iops expected to be basetypes.Int64Value, was: %T`, diskIopsAttribute)) + } + + diskSizeGbAttribute, ok := attributes["disk_size_gb"] + + if !ok { + diags.AddError( + "Attribute Missing", + `disk_size_gb is missing from object`) + + return NewReadOnlySpecsValueUnknown(), diags + } + + diskSizeGbVal, ok := diskSizeGbAttribute.(basetypes.Float64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`disk_size_gb expected to be basetypes.Float64Value, was: %T`, diskSizeGbAttribute)) + } + + ebsVolumeTypeAttribute, ok := attributes["ebs_volume_type"] + + if !ok { + diags.AddError( + "Attribute Missing", + `ebs_volume_type is missing from object`) + + return NewReadOnlySpecsValueUnknown(), diags + } + + ebsVolumeTypeVal, ok := ebsVolumeTypeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`ebs_volume_type expected to be basetypes.StringValue, was: %T`, ebsVolumeTypeAttribute)) + } + + instanceSizeAttribute, ok := attributes["instance_size"] + + if !ok { + diags.AddError( + "Attribute Missing", + `instance_size is missing from object`) + + return NewReadOnlySpecsValueUnknown(), diags + } + + instanceSizeVal, ok := instanceSizeAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`instance_size expected to be basetypes.StringValue, was: %T`, instanceSizeAttribute)) + } + + nodeCountAttribute, ok := attributes["node_count"] + + if !ok { + diags.AddError( + "Attribute Missing", + `node_count is missing from object`) + + return NewReadOnlySpecsValueUnknown(), diags + } + + nodeCountVal, ok := nodeCountAttribute.(basetypes.Int64Value) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`node_count expected to be basetypes.Int64Value, was: %T`, nodeCountAttribute)) + } + + if diags.HasError() { + return NewReadOnlySpecsValueUnknown(), diags + } + + return ReadOnlySpecsValue{ + DiskIops: diskIopsVal, + DiskSizeGb: diskSizeGbVal, + EbsVolumeType: ebsVolumeTypeVal, + InstanceSize: instanceSizeVal, + NodeCount: nodeCountVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewReadOnlySpecsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) ReadOnlySpecsValue { + object, diags := NewReadOnlySpecsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewReadOnlySpecsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t ReadOnlySpecsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewReadOnlySpecsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewReadOnlySpecsValueUnknown(), nil + } + + if in.IsNull() { + return NewReadOnlySpecsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewReadOnlySpecsValueMust(ReadOnlySpecsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t ReadOnlySpecsType) ValueType(ctx context.Context) attr.Value { + return ReadOnlySpecsValue{} +} + +var _ basetypes.ObjectValuable = ReadOnlySpecsValue{} + +type ReadOnlySpecsValue struct { + DiskSizeGb basetypes.Float64Value `tfsdk:"disk_size_gb"` + EbsVolumeType basetypes.StringValue `tfsdk:"ebs_volume_type"` + InstanceSize basetypes.StringValue `tfsdk:"instance_size"` + DiskIops basetypes.Int64Value `tfsdk:"disk_iops"` + NodeCount basetypes.Int64Value `tfsdk:"node_count"` + state attr.ValueState +} + +func (v ReadOnlySpecsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 5) + + var val tftypes.Value + var err error + + attrTypes["disk_iops"] = basetypes.Int64Type{}.TerraformType(ctx) + attrTypes["disk_size_gb"] = basetypes.Float64Type{}.TerraformType(ctx) + attrTypes["ebs_volume_type"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["instance_size"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["node_count"] = basetypes.Int64Type{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 5) + + val, err = v.DiskIops.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_iops"] = val + + val, err = v.DiskSizeGb.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["disk_size_gb"] = val + + val, err = v.EbsVolumeType.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["ebs_volume_type"] = val + + val, err = v.InstanceSize.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["instance_size"] = val + + val, err = v.NodeCount.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["node_count"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v ReadOnlySpecsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v ReadOnlySpecsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v ReadOnlySpecsValue) String() string { + return "ReadOnlySpecsValue" +} + +func (v ReadOnlySpecsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "disk_iops": basetypes.Int64Type{}, + "disk_size_gb": basetypes.Float64Type{}, + "ebs_volume_type": basetypes.StringType{}, + "instance_size": basetypes.StringType{}, + "node_count": basetypes.Int64Type{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "disk_iops": v.DiskIops, + "disk_size_gb": v.DiskSizeGb, + "ebs_volume_type": v.EbsVolumeType, + "instance_size": v.InstanceSize, + "node_count": v.NodeCount, + }) + + return objVal, diags +} + +func (v ReadOnlySpecsValue) Equal(o attr.Value) bool { + other, ok := o.(ReadOnlySpecsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.DiskIops.Equal(other.DiskIops) { + return false + } + + if !v.DiskSizeGb.Equal(other.DiskSizeGb) { + return false + } + + if !v.EbsVolumeType.Equal(other.EbsVolumeType) { + return false + } + + if !v.InstanceSize.Equal(other.InstanceSize) { + return false + } + + if !v.NodeCount.Equal(other.NodeCount) { + return false + } + + return true +} + +func (v ReadOnlySpecsValue) Type(ctx context.Context) attr.Type { + return ReadOnlySpecsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v ReadOnlySpecsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "disk_iops": basetypes.Int64Type{}, + "disk_size_gb": basetypes.Float64Type{}, + "ebs_volume_type": basetypes.StringType{}, + "instance_size": basetypes.StringType{}, + "node_count": basetypes.Int64Type{}, + } +} + +var _ basetypes.ObjectTypable = TagsType{} + +type TagsType struct { + basetypes.ObjectType +} + +func (t TagsType) Equal(o attr.Type) bool { + other, ok := o.(TagsType) + + if !ok { + return false + } + + return t.ObjectType.Equal(other.ObjectType) +} + +func (t TagsType) String() string { + return "TagsType" +} + +func (t TagsType) ValueFromObject(ctx context.Context, in basetypes.ObjectValue) (basetypes.ObjectValuable, diag.Diagnostics) { + var diags diag.Diagnostics + + attributes := in.Attributes() + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return nil, diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return nil, diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return nil, diags + } + + return TagsValue{ + Key: keyVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewTagsValueNull() TagsValue { + return TagsValue{ + state: attr.ValueStateNull, + } +} + +func NewTagsValueUnknown() TagsValue { + return TagsValue{ + state: attr.ValueStateUnknown, + } +} + +func NewTagsValue(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) (TagsValue, diag.Diagnostics) { + var diags diag.Diagnostics + + // Reference: https://github.com/hashicorp/terraform-plugin-framework/issues/521 + ctx := context.Background() + + for name, attributeType := range attributeTypes { + attribute, ok := attributes[name] + + if !ok { + diags.AddError( + "Missing TagsValue Attribute Value", + "While creating a TagsValue value, a missing attribute value was detected. "+ + "A TagsValue must contain values for all attributes, even if null or unknown. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("TagsValue Attribute Name (%s) Expected Type: %s", name, attributeType.String()), + ) + + continue + } + + if !attributeType.Equal(attribute.Type(ctx)) { + diags.AddError( + "Invalid TagsValue Attribute Type", + "While creating a TagsValue value, an invalid attribute value was detected. "+ + "A TagsValue must use a matching attribute type for the value. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("TagsValue Attribute Name (%s) Expected Type: %s\n", name, attributeType.String())+ + fmt.Sprintf("TagsValue Attribute Name (%s) Given Type: %s", name, attribute.Type(ctx)), + ) + } + } + + for name := range attributes { + _, ok := attributeTypes[name] + + if !ok { + diags.AddError( + "Extra TagsValue Attribute Value", + "While creating a TagsValue value, an extra attribute value was detected. "+ + "A TagsValue must not contain values beyond the expected attribute types. "+ + "This is always an issue with the provider and should be reported to the provider developers.\n\n"+ + fmt.Sprintf("Extra TagsValue Attribute Name: %s", name), + ) + } + } + + if diags.HasError() { + return NewTagsValueUnknown(), diags + } + + keyAttribute, ok := attributes["key"] + + if !ok { + diags.AddError( + "Attribute Missing", + `key is missing from object`) + + return NewTagsValueUnknown(), diags + } + + keyVal, ok := keyAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`key expected to be basetypes.StringValue, was: %T`, keyAttribute)) + } + + valueAttribute, ok := attributes["value"] + + if !ok { + diags.AddError( + "Attribute Missing", + `value is missing from object`) + + return NewTagsValueUnknown(), diags + } + + valueVal, ok := valueAttribute.(basetypes.StringValue) + + if !ok { + diags.AddError( + "Attribute Wrong Type", + fmt.Sprintf(`value expected to be basetypes.StringValue, was: %T`, valueAttribute)) + } + + if diags.HasError() { + return NewTagsValueUnknown(), diags + } + + return TagsValue{ + Key: keyVal, + Value: valueVal, + state: attr.ValueStateKnown, + }, diags +} + +func NewTagsValueMust(attributeTypes map[string]attr.Type, attributes map[string]attr.Value) TagsValue { + object, diags := NewTagsValue(attributeTypes, attributes) + + if diags.HasError() { + // This could potentially be added to the diag package. + diagsStrings := make([]string, 0, len(diags)) + + for _, diagnostic := range diags { + diagsStrings = append(diagsStrings, fmt.Sprintf( + "%s | %s | %s", + diagnostic.Severity(), + diagnostic.Summary(), + diagnostic.Detail())) + } + + panic("NewTagsValueMust received error(s): " + strings.Join(diagsStrings, "\n")) + } + + return object +} + +func (t TagsType) ValueFromTerraform(ctx context.Context, in tftypes.Value) (attr.Value, error) { + if in.Type() == nil { + return NewTagsValueNull(), nil + } + + if !in.Type().Equal(t.TerraformType(ctx)) { + return nil, fmt.Errorf("expected %s, got %s", t.TerraformType(ctx), in.Type()) + } + + if !in.IsKnown() { + return NewTagsValueUnknown(), nil + } + + if in.IsNull() { + return NewTagsValueNull(), nil + } + + attributes := map[string]attr.Value{} + + val := map[string]tftypes.Value{} + + err := in.As(&val) + + if err != nil { + return nil, err + } + + for k, v := range val { + a, err := t.AttrTypes[k].ValueFromTerraform(ctx, v) + + if err != nil { + return nil, err + } + + attributes[k] = a + } + + return NewTagsValueMust(TagsValue{}.AttributeTypes(ctx), attributes), nil +} + +func (t TagsType) ValueType(ctx context.Context) attr.Value { + return TagsValue{} +} + +var _ basetypes.ObjectValuable = TagsValue{} + +type TagsValue struct { + Key basetypes.StringValue `tfsdk:"key"` + Value basetypes.StringValue `tfsdk:"value"` + state attr.ValueState +} + +func (v TagsValue) ToTerraformValue(ctx context.Context) (tftypes.Value, error) { + attrTypes := make(map[string]tftypes.Type, 2) + + var val tftypes.Value + var err error + + attrTypes["key"] = basetypes.StringType{}.TerraformType(ctx) + attrTypes["value"] = basetypes.StringType{}.TerraformType(ctx) + + objectType := tftypes.Object{AttributeTypes: attrTypes} + + switch v.state { + case attr.ValueStateKnown: + vals := make(map[string]tftypes.Value, 2) + + val, err = v.Key.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["key"] = val + + val, err = v.Value.ToTerraformValue(ctx) + + if err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + vals["value"] = val + + if err := tftypes.ValidateValue(objectType, vals); err != nil { + return tftypes.NewValue(objectType, tftypes.UnknownValue), err + } + + return tftypes.NewValue(objectType, vals), nil + case attr.ValueStateNull: + return tftypes.NewValue(objectType, nil), nil + case attr.ValueStateUnknown: + return tftypes.NewValue(objectType, tftypes.UnknownValue), nil + default: + panic(fmt.Sprintf("unhandled Object state in ToTerraformValue: %s", v.state)) + } +} + +func (v TagsValue) IsNull() bool { + return v.state == attr.ValueStateNull +} + +func (v TagsValue) IsUnknown() bool { + return v.state == attr.ValueStateUnknown +} + +func (v TagsValue) String() string { + return "TagsValue" +} + +func (v TagsValue) ToObjectValue(ctx context.Context) (basetypes.ObjectValue, diag.Diagnostics) { + var diags diag.Diagnostics + + attributeTypes := map[string]attr.Type{ + "key": basetypes.StringType{}, + "value": basetypes.StringType{}, + } + + if v.IsNull() { + return types.ObjectNull(attributeTypes), diags + } + + if v.IsUnknown() { + return types.ObjectUnknown(attributeTypes), diags + } + + objVal, diags := types.ObjectValue( + attributeTypes, + map[string]attr.Value{ + "key": v.Key, + "value": v.Value, + }) + + return objVal, diags +} + +func (v TagsValue) Equal(o attr.Value) bool { + other, ok := o.(TagsValue) + + if !ok { + return false + } + + if v.state != other.state { + return false + } + + if v.state != attr.ValueStateKnown { + return true + } + + if !v.Key.Equal(other.Key) { + return false + } + + if !v.Value.Equal(other.Value) { + return false + } + + return true +} + +func (v TagsValue) Type(ctx context.Context) attr.Type { + return TagsType{ + basetypes.ObjectType{ + AttrTypes: v.AttributeTypes(ctx), + }, + } +} + +func (v TagsValue) AttributeTypes(ctx context.Context) map[string]attr.Type { + return map[string]attr.Type{ + "key": basetypes.StringType{}, + "value": basetypes.StringType{}, + } +} diff --git a/internal/service/advancedclustertpf/resource_test.go b/internal/service/advancedclustertpf/resource_test.go new file mode 100644 index 0000000000..6faf9f99ea --- /dev/null +++ b/internal/service/advancedclustertpf/resource_test.go @@ -0,0 +1,27 @@ +package advancedclustertpf_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +func TestAccAdvancedCluster_basic(t *testing.T) { + resource.ParallelTest(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Steps: []resource.TestStep{ + { + Config: configBasic(), + }, + }, + }) +} + +func configBasic() string { + return ` + resource "mongodbatlas_advanced_cluster" "test" { + } + ` +} diff --git a/internal/service/advancedclustertpf/tfplugingen/generator_config.yml b/internal/service/advancedclustertpf/tfplugingen/generator_config.yml new file mode 100644 index 0000000000..8f2c0df2f1 --- /dev/null +++ b/internal/service/advancedclustertpf/tfplugingen/generator_config.yml @@ -0,0 +1,21 @@ +provider: + name: mongodbatlas + +resources: + advanced_cluster: + read: + path: /api/atlas/v2/groups/{groupId}/clusters/{clusterName} + method: GET + create: + path: /api/atlas/v2/groups/{groupId}/clusters + method: POST + +data_sources: + advanced_cluster: + read: + path: /api/atlas/v2/groups/{groupId}/clusters/{clusterName} + method: GET + advanced_clusters: + read: + path: /api/atlas/v2/groups/{groupId}/clusters + method: GET diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 8ce9343db3..d5cef4cb6f 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -22,7 +22,7 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { mig.PreCheckBasic(t) }, + PreCheck: mig.PreCheckBasicSleep(t), CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { @@ -109,7 +109,7 @@ func TestMigBackupRSCloudBackupSchedule_copySettings(t *testing.T) { checksUpdateWithZoneID := acc.AddAttrSetChecks(resourceName, checksCreate, "copy_settings.0.zone_id") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: mig.PreCheckBasicSleep(t), CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index a39c810386..4e5930aef6 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -25,7 +25,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -159,7 +159,7 @@ func TestAccBackupRSCloudBackupSchedule_export(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, @@ -188,7 +188,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -313,7 +313,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_repSpecId(t *testing.T) { checksUpdate := acc.AddAttrChecks(resourceName, checksDefaultRS, emptyCopySettingsChecks) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -399,7 +399,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_zoneId(t *testing.T) { checksUpdate := acc.AddAttrChecks(resourceName, checksDefaultRS, emptyCopySettingsChecks) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -429,7 +429,7 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -484,7 +484,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go index 03675fe280..f9e50131c8 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/cluster" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "go.mongodb.org/atlas-sdk/v20240805004/admin" ) @@ -123,20 +123,12 @@ func Resource() *schema.Resource { } func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := meta.(*config.MongoDBClient).Atlas connV2 := meta.(*config.MongoDBClient).AtlasV2 groupID := d.Get("project_id").(string) clusterName := d.Get("cluster_name").(string) - stateConf := &retry.StateChangeConf{ - Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING"}, - Target: []string{"IDLE"}, - Refresh: cluster.ResourceClusterRefreshFunc(ctx, d.Get("cluster_name").(string), d.Get("project_id").(string), conn), - Timeout: 15 * time.Minute, - MinTimeout: 30 * time.Second, - } - _, err := stateConf.WaitForStateContext(ctx) - if err != nil { + stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, groupID, clusterName, 15*time.Minute) + if _, err := stateConf.WaitForStateContext(ctx); err != nil { return diag.FromErr(err) } @@ -155,7 +147,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. SnapshotId: snapshot.GetId(), } - stateConf = &retry.StateChangeConf{ + stateConf = retry.StateChangeConf{ Pending: []string{"queued", "inProgress"}, Target: []string{"completed", "failed"}, Refresh: resourceRefreshFunc(ctx, requestParams, connV2), diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go index 164cab06a7..3bac919f4b 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go @@ -17,7 +17,7 @@ func TestMigBackupRSCloudBackupSnapshot_basic(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { mig.PreCheckBasic(t) }, + PreCheck: mig.PreCheckBasicSleep(t), CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { @@ -51,7 +51,7 @@ func TestMigBackupRSCloudBackupSnapshot_sharded(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: mig.PreCheckBasicSleep(t), CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go index a6cb9dec02..31a121a232 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go @@ -26,7 +26,7 @@ func TestAccBackupRSCloudBackupSnapshot_basic(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -75,7 +75,7 @@ func TestAccBackupRSCloudBackupSnapshot_sharded(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go index 99125326f6..9785d63bc4 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go @@ -54,7 +54,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { checks = acc.AddAttrChecks(dataSourcePluralName, checks, attrsPluralDS) return &resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(tb) }, + PreCheck: acc.PreCheckBasicSleep(tb), ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go index c9e6b97f8f..d70a05526b 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go @@ -43,7 +43,7 @@ func TestAccCloudBackupSnapshotRestoreJob_basicDownload(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -75,7 +75,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { ) return &resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(tb) }, + PreCheck: acc.PreCheckBasicSleep(tb), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ diff --git a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go index ae8eb415cd..3af0198013 100644 --- a/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go +++ b/internal/service/cloudprovideraccess/resource_cloud_provider_access_authorization.go @@ -338,9 +338,13 @@ func featureUsagesSchema() *schema.Resource { } func featureToSchema(feature admin.CloudProviderAccessFeatureUsage) map[string]any { - featureID, _ := feature.GetFeatureId().ToMap() + featureID := feature.GetFeatureId() + featureIDMap := map[string]any{ + "project_id": featureID.GetGroupId(), + "bucket_name": featureID.GetBucketName(), + } return map[string]any{ "feature_type": feature.GetFeatureType(), - "feature_id": featureID, + "feature_id": featureIDMap, } } diff --git a/internal/service/cluster/data_source_cluster.go b/internal/service/cluster/data_source_cluster.go index 32b995875c..a1154e48ca 100644 --- a/internal/service/cluster/data_source_cluster.go +++ b/internal/service/cluster/data_source_cluster.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" matlas "go.mongodb.org/atlas/mongodbatlas" @@ -15,7 +14,7 @@ import ( func DataSource() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasClusterRead, + ReadContext: dataSourceRead, Schema: map[string]*schema.Schema{ "project_id": { Type: schema.TypeString, @@ -287,9 +286,8 @@ func DataSource() *schema.Resource { Computed: true, }, "labels": { - Type: schema.TypeSet, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), + Type: schema.TypeSet, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -317,13 +315,17 @@ func DataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "redact_client_log_data": { + Type: schema.TypeBool, + Computed: true, + }, }, } } -func dataSourceMongoDBAtlasClusterRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. +func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).Atlas + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) clusterName := d.Get("name").(string) @@ -489,6 +491,14 @@ func dataSourceMongoDBAtlasClusterRead(ctx context.Context, d *schema.ResourceDa return diag.FromErr(err) } + redactClientLogData, err := newAtlasGet(ctx, connV2, projectID, clusterName) + if err != nil { + return diag.FromErr(fmt.Errorf(errorClusterRead, clusterName, err)) + } + if err := d.Set("redact_client_log_data", redactClientLogData); err != nil { + return diag.FromErr(fmt.Errorf(advancedcluster.ErrorClusterSetting, "redact_client_log_data", clusterName, err)) + } + d.SetId(cluster.ID) return nil diff --git a/internal/service/cluster/data_source_clusters.go b/internal/service/cluster/data_source_clusters.go index c5e8ac10da..7012274d78 100644 --- a/internal/service/cluster/data_source_clusters.go +++ b/internal/service/cluster/data_source_clusters.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" matlas "go.mongodb.org/atlas/mongodbatlas" @@ -17,7 +16,7 @@ import ( func PluralDataSource() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceMongoDBAtlasClustersRead, + ReadContext: dataSourcePluralRead, Schema: map[string]*schema.Schema{ "project_id": { Type: schema.TypeString, @@ -290,9 +289,8 @@ func PluralDataSource() *schema.Resource { Computed: true, }, "labels": { - Type: schema.TypeSet, - Computed: true, - Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), + Type: schema.TypeSet, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -312,11 +310,15 @@ func PluralDataSource() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "termination_protection_enabled": { + Type: schema.TypeBool, + Computed: true, + }, "version_release_system": { Type: schema.TypeString, Computed: true, }, - "termination_protection_enabled": { + "redact_client_log_data": { Type: schema.TypeBool, Computed: true, }, @@ -327,9 +329,9 @@ func PluralDataSource() *schema.Resource { } } -func dataSourceMongoDBAtlasClustersRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. +func dataSourcePluralRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).Atlas + connV2 := meta.(*config.MongoDBClient).AtlasV2 projectID := d.Get("project_id").(string) d.SetId(id.UniqueId()) @@ -338,18 +340,25 @@ func dataSourceMongoDBAtlasClustersRead(ctx context.Context, d *schema.ResourceD if resp != nil && resp.StatusCode == http.StatusNotFound { return nil } - return diag.FromErr(fmt.Errorf("error reading cluster list for project(%s): %s", projectID, err)) } - if err := d.Set("results", flattenClusters(ctx, d, conn, clusters)); err != nil { + redactClientLogDataMap, err := newAtlasList(ctx, connV2, projectID) + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil + } + return diag.FromErr(fmt.Errorf("error reading new cluster list for project(%s): %s", projectID, err)) + } + + if err := d.Set("results", flattenClusters(ctx, d, conn, clusters, redactClientLogDataMap)); err != nil { return diag.FromErr(fmt.Errorf(advancedcluster.ErrorClusterSetting, "results", d.Id(), err)) } return nil } -func flattenClusters(ctx context.Context, d *schema.ResourceData, conn *matlas.Client, clusters []matlas.Cluster) []map[string]any { +func flattenClusters(ctx context.Context, d *schema.ResourceData, conn *matlas.Client, clusters []matlas.Cluster, redactClientLogDataMap map[string]bool) []map[string]any { results := make([]map[string]any, 0) for i := range clusters { @@ -411,6 +420,7 @@ func flattenClusters(ctx context.Context, d *schema.ResourceData, conn *matlas.C "termination_protection_enabled": clusters[i].TerminationProtectionEnabled, "version_release_system": clusters[i].VersionReleaseSystem, "container_id": containerID, + "redact_client_log_data": redactClientLogDataMap[clusters[i].Name], } results = append(results, result) } diff --git a/internal/service/cluster/new_atlas.go b/internal/service/cluster/new_atlas.go new file mode 100644 index 0000000000..b6619c2807 --- /dev/null +++ b/internal/service/cluster/new_atlas.go @@ -0,0 +1,48 @@ +package cluster + +import ( + "context" + "time" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" + "go.mongodb.org/atlas-sdk/v20240805004/admin" +) + +func newAtlasUpdate(ctx context.Context, timeout time.Duration, connV2 *admin.APIClient, projectID, clusterName string, redactClientLogData bool) error { + current, err := newAtlasGet(ctx, connV2, projectID, clusterName) + if err != nil { + return err + } + if current == redactClientLogData { + return nil + } + req := &admin.ClusterDescription20240805{ + RedactClientLogData: &redactClientLogData, + } + if _, _, err = connV2.ClustersApi.UpdateCluster(ctx, projectID, clusterName, req).Execute(); err != nil { + return err + } + stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, clusterName, timeout) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { + return err + } + return nil +} + +func newAtlasGet(ctx context.Context, connV2 *admin.APIClient, projectID, clusterName string) (redactClientLogData bool, err error) { + cluster, _, err := connV2.ClustersApi.GetCluster(ctx, projectID, clusterName).Execute() + return cluster.GetRedactClientLogData(), err +} + +func newAtlasList(ctx context.Context, connV2 *admin.APIClient, projectID string) (map[string]bool, error) { + clusters, _, err := connV2.ClustersApi.ListClusters(ctx, projectID).Execute() + if err != nil { + return nil, err + } + results := clusters.GetResults() + list := make(map[string]bool) + for i := range results { + list[results[i].GetName()] = results[i].GetRedactClientLogData() + } + return list, nil +} diff --git a/internal/service/cluster/resource_cluster.go b/internal/service/cluster/resource_cluster.go index a462044b5c..b96f949a2e 100644 --- a/internal/service/cluster/resource_cluster.go +++ b/internal/service/cluster/resource_cluster.go @@ -9,9 +9,9 @@ import ( "net/http" "reflect" "regexp" - "strings" "time" + "go.mongodb.org/atlas-sdk/v20240805004/admin" matlas "go.mongodb.org/atlas/mongodbatlas" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -20,7 +20,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/spf13/cast" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/validate" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" @@ -40,12 +39,12 @@ var defaultLabel = matlas.Label{Key: "Infrastructure Tool", Value: "MongoDB Atla func Resource() *schema.Resource { return &schema.Resource{ - CreateWithoutTimeout: resourceMongoDBAtlasClusterCreate, - ReadWithoutTimeout: resourceMongoDBAtlasClusterRead, - UpdateWithoutTimeout: resourceMongoDBAtlasClusterUpdate, - DeleteWithoutTimeout: resourceMongoDBAtlasClusterDelete, + CreateWithoutTimeout: resourceCreate, + ReadWithoutTimeout: resourceRead, + UpdateWithoutTimeout: resourceUpdate, + DeleteWithoutTimeout: resourceDelete, Importer: &schema.ResourceImporter{ - StateContext: resourceMongoDBAtlasClusterImportState, + StateContext: resourceImport, }, SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ @@ -311,10 +310,9 @@ func Resource() *schema.Resource { }, "advanced_configuration": advancedcluster.SchemaAdvancedConfig(), "labels": { - Type: schema.TypeSet, - Optional: true, - Set: advancedcluster.HashFunctionForKeyValuePair, - Deprecated: fmt.Sprintf(constant.DeprecationParamFutureWithReplacement, "tags"), + Type: schema.TypeSet, + Optional: true, + Set: advancedcluster.HashFunctionForKeyValuePair, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "key": { @@ -351,6 +349,11 @@ func Resource() *schema.Resource { Computed: true, Description: "Submit this field alongside your topology reconfiguration to request a new regional outage resistant topology", }, + "redact_client_log_data": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, }, CustomizeDiff: resourceClusterCustomizeDiff, Timeouts: &schema.ResourceTimeout{ @@ -361,22 +364,24 @@ func Resource() *schema.Resource { } } -func resourceMongoDBAtlasClusterCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { +func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { if v, ok := d.GetOk("accept_data_risks_and_force_replica_set_reconfig"); ok { if v.(string) != "" { return diag.FromErr(fmt.Errorf("accept_data_risks_and_force_replica_set_reconfig can not be set in creation, only in update")) } } - conn := meta.(*config.MongoDBClient).Atlas - - projectID := d.Get("project_id").(string) - providerName := d.Get("provider_name").(string) - - computeEnabled := d.Get("auto_scaling_compute_enabled").(bool) - scaleDownEnabled := d.Get("auto_scaling_compute_scale_down_enabled").(bool) - minInstanceSize := d.Get("provider_auto_scaling_compute_min_instance_size").(string) - maxInstanceSize := d.Get("provider_auto_scaling_compute_max_instance_size").(string) + var ( + conn = meta.(*config.MongoDBClient).Atlas + connV2 = meta.(*config.MongoDBClient).AtlasV2 + projectID = d.Get("project_id").(string) + clusterName = d.Get("name").(string) + providerName = d.Get("provider_name").(string) + computeEnabled = d.Get("auto_scaling_compute_enabled").(bool) + scaleDownEnabled = d.Get("auto_scaling_compute_scale_down_enabled").(bool) + minInstanceSize = d.Get("provider_auto_scaling_compute_min_instance_size").(string) + maxInstanceSize = d.Get("provider_auto_scaling_compute_max_instance_size").(string) + ) if scaleDownEnabled && !computeEnabled { return diag.FromErr(fmt.Errorf("`auto_scaling_compute_scale_down_enabled` must be set when `auto_scaling_compute_enabled` is set")) @@ -474,7 +479,7 @@ func resourceMongoDBAtlasClusterCreate(ctx context.Context, d *schema.ResourceDa } clusterRequest := &matlas.Cluster{ - Name: d.Get("name").(string), + Name: clusterName, EncryptionAtRestProvider: d.Get("encryption_at_rest_provider").(string), ClusterType: clusterType, BackupEnabled: conversion.Pointer(d.Get("backup_enabled").(bool)), @@ -538,18 +543,8 @@ func resourceMongoDBAtlasClusterCreate(ctx context.Context, d *schema.ResourceDa } timeout := d.Timeout(schema.TimeoutCreate) - stateConf := &retry.StateChangeConf{ - Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"}, - Target: []string{"IDLE"}, - Refresh: ResourceClusterRefreshFunc(ctx, d.Get("name").(string), projectID, conn), - Timeout: timeout, - MinTimeout: 1 * time.Minute, - Delay: 3 * time.Minute, - } - - // Wait, catching any errors - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { + stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, clusterName, timeout) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return diag.FromErr(fmt.Errorf(errorClusterCreate, err)) } @@ -575,25 +570,31 @@ func resourceMongoDBAtlasClusterCreate(ctx context.Context, d *schema.ResourceDa Paused: conversion.Pointer(v), } - _, _, err = updateCluster(ctx, conn, clusterRequest, projectID, d.Get("name").(string), timeout) + _, _, err = updateCluster(ctx, conn, connV2, clusterRequest, projectID, clusterName, timeout) if err != nil { - return diag.FromErr(fmt.Errorf(errorClusterUpdate, d.Get("name").(string), err)) + return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err)) + } + } + + if v, ok := d.GetOk("redact_client_log_data"); ok { + if err := newAtlasUpdate(ctx, d.Timeout(schema.TimeoutCreate), connV2, projectID, clusterName, v.(bool)); err != nil { + return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err)) } } d.SetId(conversion.EncodeStateID(map[string]string{ "cluster_id": cluster.ID, "project_id": projectID, - "cluster_name": cluster.Name, + "cluster_name": clusterName, "provider_name": providerName, })) - return resourceMongoDBAtlasClusterRead(ctx, d, meta) + return resourceRead(ctx, d, meta) } -func resourceMongoDBAtlasClusterRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. +func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).Atlas + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] @@ -774,19 +775,31 @@ func resourceMongoDBAtlasClusterRead(ctx context.Context, d *schema.ResourceData return diag.FromErr(err) } + redactClientLogData, err := newAtlasGet(ctx, connV2, projectID, clusterName) + if err != nil { + return diag.FromErr(fmt.Errorf(errorClusterRead, clusterName, err)) + } + if err := d.Set("redact_client_log_data", redactClientLogData); err != nil { + return diag.FromErr(fmt.Errorf(advancedcluster.ErrorClusterSetting, "redact_client_log_data", clusterName, err)) + } + return nil } -func resourceMongoDBAtlasClusterUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. - conn := meta.(*config.MongoDBClient).Atlas - ids := conversion.DecodeStateID(d.Id()) - projectID := ids["project_id"] - clusterName := ids["cluster_name"] - - cluster := new(matlas.Cluster) - clusterChangeDetect := new(matlas.Cluster) - clusterChangeDetect.AutoScaling = &matlas.AutoScaling{Compute: &matlas.Compute{}} +func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + var ( + conn = meta.(*config.MongoDBClient).Atlas + connV2 = meta.(*config.MongoDBClient).AtlasV2 + ids = conversion.DecodeStateID(d.Id()) + projectID = ids["project_id"] + clusterName = ids["cluster_name"] + cluster = new(matlas.Cluster) + clusterChangeDetect = &matlas.Cluster{ + AutoScaling: &matlas.AutoScaling{ + Compute: &matlas.Compute{}, + }, + } + ) if d.HasChange("name") { cluster.Name, _ = d.Get("name").(string) @@ -942,7 +955,7 @@ func resourceMongoDBAtlasClusterUpdate(ctx context.Context, d *schema.ResourceDa } if isUpgradeRequired(d) { - updatedCluster, _, err := upgradeCluster(ctx, conn, cluster, projectID, clusterName, timeout) + updatedCluster, _, err := upgradeCluster(ctx, conn, connV2, cluster, projectID, clusterName, timeout) if err != nil { return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err)) @@ -956,14 +969,14 @@ func resourceMongoDBAtlasClusterUpdate(ctx context.Context, d *schema.ResourceDa })) } else if !reflect.DeepEqual(cluster, clusterChangeDetect) { err := retry.RetryContext(ctx, timeout, func() *retry.RetryError { - _, _, err := updateCluster(ctx, conn, cluster, projectID, clusterName, timeout) + _, _, err := updateCluster(ctx, conn, connV2, cluster, projectID, clusterName, timeout) if didErrOnPausedCluster(err) { clusterRequest := &matlas.Cluster{ Paused: conversion.Pointer(false), } - _, _, err = updateCluster(ctx, conn, clusterRequest, projectID, clusterName, timeout) + _, _, err = updateCluster(ctx, conn, connV2, clusterRequest, projectID, clusterName, timeout) } if err != nil { @@ -983,13 +996,20 @@ func resourceMongoDBAtlasClusterUpdate(ctx context.Context, d *schema.ResourceDa Paused: conversion.Pointer(true), } - _, _, err := updateCluster(ctx, conn, clusterRequest, projectID, clusterName, timeout) + _, _, err := updateCluster(ctx, conn, connV2, clusterRequest, projectID, clusterName, timeout) if err != nil { return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err)) } } - return resourceMongoDBAtlasClusterRead(ctx, d, meta) + if d.HasChange("redact_client_log_data") { + redactClientLogData := d.Get("redact_client_log_data").(bool) + if err := newAtlasUpdate(ctx, d.Timeout(schema.TimeoutUpdate), connV2, projectID, clusterName, redactClientLogData); err != nil { + return diag.FromErr(fmt.Errorf(errorClusterUpdate, clusterName, err)) + } + } + + return resourceRead(ctx, d, meta) } func IsMultiRegionCluster(repSpecs []matlas.ReplicationSpec) bool { @@ -1023,9 +1043,9 @@ func didErrOnPausedCluster(err error) bool { return errors.As(err, &target) && target.ErrorCode == "CANNOT_UPDATE_PAUSED_CLUSTER" } -func resourceMongoDBAtlasClusterDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get client connection. +func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*config.MongoDBClient).Atlas + connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) projectID := ids["project_id"] clusterName := ids["cluster_name"] @@ -1042,27 +1062,15 @@ func resourceMongoDBAtlasClusterDelete(ctx context.Context, d *schema.ResourceDa return diag.FromErr(fmt.Errorf(errorClusterDelete, clusterName, err)) } - log.Println("[INFO] Waiting for MongoDB Cluster to be destroyed") - - stateConf := &retry.StateChangeConf{ - Pending: []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"}, - Target: []string{"DELETED"}, - Refresh: ResourceClusterRefreshFunc(ctx, clusterName, projectID, conn), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 30 * time.Second, - Delay: 1 * time.Minute, // Wait 30 secs before starting - } - - // Wait, catching any errors - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { + stateConf := advancedcluster.DeleteStateChangeConfig(ctx, connV2, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return diag.FromErr(fmt.Errorf(errorClusterDelete, clusterName, err)) } return nil } -func resourceMongoDBAtlasClusterImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { +func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { conn := meta.(*config.MongoDBClient).Atlas projectID, name, err := splitSClusterImportID(d.Id()) @@ -1185,24 +1193,14 @@ func isEqualProviderAutoScalingMaxInstanceSize(k, old, newStr string, d *schema. return true } -func updateCluster(ctx context.Context, conn *matlas.Client, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) { +func updateCluster(ctx context.Context, conn *matlas.Client, connV2 *admin.APIClient, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) { cluster, resp, err := conn.Clusters.Update(ctx, projectID, name, request) if err != nil { return nil, nil, err } - stateConf := &retry.StateChangeConf{ - Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, - Target: []string{"IDLE"}, - Refresh: ResourceClusterRefreshFunc(ctx, name, projectID, conn), - Timeout: timeout, - MinTimeout: 30 * time.Second, - Delay: 1 * time.Minute, - } - - // Wait, catching any errors - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { + stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, name, timeout) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return nil, nil, err } @@ -1288,35 +1286,7 @@ func computedCloudProviderSnapshotBackupPolicySchema() *schema.Schema { } } -func ResourceClusterRefreshFunc(ctx context.Context, name, projectID string, conn *matlas.Client) retry.StateRefreshFunc { - return func() (any, string, error) { - c, resp, err := conn.Clusters.Get(ctx, projectID, name) - - if err != nil && strings.Contains(err.Error(), "reset by peer") { - return nil, "REPEATING", nil - } - - if err != nil && c == nil && resp == nil { - return nil, "", err - } else if err != nil { - if resp.StatusCode == 404 { - return "", "DELETED", nil - } - if resp.StatusCode == 503 { - return "", "PENDING", nil - } - return nil, "", err - } - - if c.StateName != "" { - log.Printf("[DEBUG] status for MongoDB cluster: %s: %s", name, c.StateName) - } - - return c, c.StateName, nil - } -} - -func upgradeCluster(ctx context.Context, conn *matlas.Client, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) { +func upgradeCluster(ctx context.Context, conn *matlas.Client, connV2 *admin.APIClient, request *matlas.Cluster, projectID, name string, timeout time.Duration) (*matlas.Cluster, *matlas.Response, error) { request.Name = name cluster, resp, err := conn.Clusters.Upgrade(ctx, projectID, request) @@ -1324,18 +1294,8 @@ func upgradeCluster(ctx context.Context, conn *matlas.Client, request *matlas.Cl return nil, nil, err } - stateConf := &retry.StateChangeConf{ - Pending: []string{"CREATING", "UPDATING", "REPAIRING"}, - Target: []string{"IDLE"}, - Refresh: ResourceClusterRefreshFunc(ctx, name, projectID, conn), - Timeout: timeout, - MinTimeout: 30 * time.Second, - Delay: 1 * time.Minute, - } - - // Wait, catching any errors - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { + stateConf := advancedcluster.CreateStateChangeConfig(ctx, connV2, projectID, name, timeout) + if _, err = stateConf.WaitForStateContext(ctx); err != nil { return nil, nil, err } diff --git a/internal/service/cluster/resource_cluster_test.go b/internal/service/cluster/resource_cluster_test.go index bcf8079c09..92199d7cfc 100644 --- a/internal/service/cluster/resource_cluster_test.go +++ b/internal/service/cluster/resource_cluster_test.go @@ -18,8 +18,9 @@ import ( ) const ( - resourceName = "mongodbatlas_cluster.test" - dataSourceName = "data.mongodbatlas_cluster.test" + resourceName = "mongodbatlas_cluster.test" + dataSourceName = "data.mongodbatlas_cluster.test" + dataSourcePluralName = "data.mongodbatlas_clusters.test" ) func TestAccCluster_basicAWS_simple(t *testing.T) { @@ -35,7 +36,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { ) return &resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(tb) }, + PreCheck: acc.PreCheckBasicSleep(tb), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -100,7 +101,7 @@ func partialAdvancedConfTestCase(tb testing.TB) *resource.TestCase { ) return &resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(tb) }, + PreCheck: acc.PreCheckBasicSleep(tb), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -162,7 +163,7 @@ func TestAccCluster_basic_DefaultWriteRead_AdvancedConf(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -221,7 +222,7 @@ func TestAccCluster_emptyAdvancedConf(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -263,7 +264,7 @@ func TestAccCluster_basicAdvancedConf(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -624,7 +625,7 @@ func TestAccCluster_AWSWithLabels(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -693,10 +694,9 @@ func TestAccCluster_AWSWithLabels(t *testing.T) { func TestAccCluster_WithTags(t *testing.T) { var ( - dataSourceClustersName = "data.mongodbatlas_clusters.test" - orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") - projectName = acc.RandomProjectName() // No ProjectIDExecution because this test has plural datasource - clusterName = acc.RandomClusterName() + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution because this test has plural datasource + clusterName = acc.RandomClusterName() ) resource.ParallelTest(t, resource.TestCase{ @@ -713,7 +713,7 @@ func TestAccCluster_WithTags(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), resource.TestCheckResourceAttr(resourceName, "tags.#", "0"), resource.TestCheckResourceAttr(dataSourceName, "tags.#", "0"), - resource.TestCheckResourceAttr(dataSourceClustersName, "results.0.tags.#", "0"), + resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.tags.#", "0"), ), }, { @@ -740,9 +740,9 @@ func TestAccCluster_WithTags(t *testing.T) { resource.TestCheckResourceAttr(dataSourceName, "tags.#", "2"), resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "tags.*", acc.ClusterTagsMap1), resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "tags.*", acc.ClusterTagsMap2), - resource.TestCheckResourceAttr(dataSourceClustersName, "results.0.tags.#", "2"), - resource.TestCheckTypeSetElemNestedAttrs(dataSourceClustersName, "results.0.tags.*", acc.ClusterTagsMap1), - resource.TestCheckTypeSetElemNestedAttrs(dataSourceClustersName, "results.0.tags.*", acc.ClusterTagsMap2), + resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.tags.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(dataSourcePluralName, "results.0.tags.*", acc.ClusterTagsMap1), + resource.TestCheckTypeSetElemNestedAttrs(dataSourcePluralName, "results.0.tags.*", acc.ClusterTagsMap2), ), }, { @@ -763,8 +763,8 @@ func TestAccCluster_WithTags(t *testing.T) { resource.TestCheckTypeSetElemNestedAttrs(resourceName, "tags.*", acc.ClusterTagsMap3), resource.TestCheckResourceAttr(dataSourceName, "tags.#", "1"), resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "tags.*", acc.ClusterTagsMap3), - resource.TestCheckResourceAttr(dataSourceClustersName, "results.0.tags.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs(dataSourceClustersName, "results.0.tags.*", acc.ClusterTagsMap3), + resource.TestCheckResourceAttr(dataSourcePluralName, "results.0.tags.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(dataSourcePluralName, "results.0.tags.*", acc.ClusterTagsMap3), ), }, }, @@ -986,7 +986,7 @@ func TestAccCluster_withAutoScalingAWS(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -1267,7 +1267,7 @@ func TestAccCluster_basicAWS_UnpauseToPaused(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -1315,7 +1315,7 @@ func TestAccCluster_basicAWS_PausedToUnpaused(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -1349,6 +1349,63 @@ func TestAccCluster_basicAWS_PausedToUnpaused(t *testing.T) { }) } +func TestAccCluster_basic_RedactClientLogData(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution so redactClientLogData tests can be run in parallel because plural data source + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configRedactClientLogData(orgID, projectName, clusterName, nil), + Check: acc.CheckRSAndDS(resourceName, conversion.Pointer(dataSourceName), conversion.Pointer(dataSourcePluralName), + nil, map[string]string{"redact_client_log_data": "false"}), + }, + { + Config: configRedactClientLogData(orgID, projectName, clusterName, conversion.Pointer(false)), + Check: acc.CheckRSAndDS(resourceName, conversion.Pointer(dataSourceName), conversion.Pointer(dataSourcePluralName), + nil, map[string]string{"redact_client_log_data": "false"}), + }, + { + Config: configRedactClientLogData(orgID, projectName, clusterName, conversion.Pointer(true)), + Check: acc.CheckRSAndDS(resourceName, conversion.Pointer(dataSourceName), conversion.Pointer(dataSourcePluralName), + nil, map[string]string{"redact_client_log_data": "true"}), + }, + { + Config: configRedactClientLogData(orgID, projectName, clusterName, conversion.Pointer(false)), + Check: acc.CheckRSAndDS(resourceName, conversion.Pointer(dataSourceName), conversion.Pointer(dataSourcePluralName), + nil, map[string]string{"redact_client_log_data": "false"}), + }, + }, + }) +} + +func TestAccCluster_create_RedactClientLogData(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acc.RandomProjectName() // No ProjectIDExecution so redactClientLogData tests can be run in parallel because plural data source + clusterName = acc.RandomClusterName() + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: acc.CheckDestroyCluster, + Steps: []resource.TestStep{ + { + Config: configRedactClientLogData(orgID, projectName, clusterName, conversion.Pointer(true)), + Check: acc.CheckRSAndDS(resourceName, conversion.Pointer(dataSourceName), conversion.Pointer(dataSourcePluralName), + nil, map[string]string{"redact_client_log_data": "true"}), + }, + }, + }) +} + func testAccGetMongoDBAtlasMajorVersion() string { conn, _ := matlas.New(http.DefaultClient, matlas.SetBaseURL(matlas.CloudURL)) majorVersion, _, _ := conn.DefaultMongoDBMajorVersion.Get(context.Background()) @@ -1788,6 +1845,49 @@ func configTenantUpdated(orgID, projectName, name string) string { `, orgID, projectName, name) } +func configRedactClientLogData(orgID, projectName, clusterName string, redactClientLogData *bool) string { + var addtionalStr string + if redactClientLogData != nil { + addtionalStr = fmt.Sprintf(`redact_client_log_data = %t`, *redactClientLogData) + } + + return fmt.Sprintf(` + resource "mongodbatlas_project" "test" { + org_id = %[1]q + name = %[2]q + } + + resource "mongodbatlas_cluster" "test" { + project_id = mongodbatlas_project.test.id + name = %[3]q + cluster_type = "REPLICASET" + replication_specs { + num_shards = 1 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + provider_name = "AWS" + provider_instance_size_name = "M10" + %[4]s + } + + data "mongodbatlas_cluster" "test" { + project_id = mongodbatlas_cluster.test.project_id + name = mongodbatlas_cluster.test.name + depends_on = ["mongodbatlas_cluster.test"] + } + + data "mongodbatlas_clusters" "test" { + project_id = mongodbatlas_cluster.test.project_id + depends_on = ["mongodbatlas_cluster.test"] + } + `, orgID, projectName, clusterName, addtionalStr) +} + func testAccMongoDBAtlasClusterAWSConfigdWithLabels(projectID, name, backupEnabled, tier, region string, labels []matlas.Label) string { var labelsConf string for _, label := range labels { diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go index cd0eb7dae5..1976c09d56 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go @@ -32,7 +32,7 @@ func singleRegionTestCase(t *testing.T) *resource.TestCase { clusterName = clusterInfo.Name ) return &resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -80,7 +80,7 @@ func multiRegionTestCase(t *testing.T) *resource.TestCase { ) return &resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ diff --git a/internal/service/controlplaneipaddresses/data_source_schema.go b/internal/service/controlplaneipaddresses/data_source_schema.go index 1afc3dac0a..823fb7a162 100644 --- a/internal/service/controlplaneipaddresses/data_source_schema.go +++ b/internal/service/controlplaneipaddresses/data_source_schema.go @@ -1,5 +1,3 @@ -// Code generated by terraform-plugin-framework-generator DO NOT EDIT. - package controlplaneipaddresses import ( diff --git a/internal/service/encryptionatrest/resource.go b/internal/service/encryptionatrest/resource.go index e2599fc51e..80947ea814 100644 --- a/internal/service/encryptionatrest/resource.go +++ b/internal/service/encryptionatrest/resource.go @@ -384,12 +384,8 @@ func (r *encryptionAtRestRS) Update(ctx context.Context, req resource.UpdateRequ return } projectID := encryptionAtRestState.ProjectID.ValueString() - atlasEncryptionAtRest, atlasResp, err := connV2.EncryptionAtRestUsingCustomerKeyManagementApi.GetEncryptionAtRest(context.Background(), projectID).Execute() + atlasEncryptionAtRest, _, err := connV2.EncryptionAtRestUsingCustomerKeyManagementApi.GetEncryptionAtRest(context.Background(), projectID).Execute() if err != nil { - if resp != nil && atlasResp.StatusCode == http.StatusNotFound { - resp.State.RemoveResource(ctx) - return - } resp.Diagnostics.AddError("error when getting encryption at rest resource during update", fmt.Sprintf(project.ErrorProjectRead, projectID, err.Error())) return } diff --git a/internal/service/eventtrigger/resource_event_trigger.go b/internal/service/eventtrigger/resource_event_trigger.go index 0aa40f2b27..ee02d7f1f3 100644 --- a/internal/service/eventtrigger/resource_event_trigger.go +++ b/internal/service/eventtrigger/resource_event_trigger.go @@ -402,7 +402,6 @@ func resourceMongoDBAtlasEventTriggersRead(ctx context.Context, d *schema.Resour } func resourceMongoDBAtlasEventTriggersUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get the client connection. conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) if err != nil { return diag.FromErr(err) @@ -418,12 +417,10 @@ func resourceMongoDBAtlasEventTriggersUpdate(ctx context.Context, d *schema.Reso Name: d.Get("name").(string), Type: typeTrigger, FunctionID: d.Get("function_id").(string), + Disabled: conversion.Pointer(d.Get("disabled").(bool)), } eventTriggerConfig := &realm.EventTriggerConfig{} - if d.HasChange("disabled") { - eventReq.Disabled = conversion.Pointer(d.Get("disabled").(bool)) - } if typeTrigger == "DATABASE" { eventTriggerConfig.OperationTypes = cast.ToStringSlice(d.Get("config_operation_types")) eventTriggerConfig.Database = d.Get("config_database").(string) diff --git a/internal/service/eventtrigger/resource_event_trigger_test.go b/internal/service/eventtrigger/resource_event_trigger_test.go index 769d0d33b5..30f2c2734d 100644 --- a/internal/service/eventtrigger/resource_event_trigger_test.go +++ b/internal/service/eventtrigger/resource_event_trigger_test.go @@ -26,7 +26,7 @@ func TestAccEventTrigger_basic(t *testing.T) { Name: acc.RandomName(), Type: "DATABASE", FunctionID: os.Getenv("MONGODB_REALM_FUNCTION_ID"), - Disabled: conversion.Pointer(false), + Disabled: conversion.Pointer(true), Config: &realm.EventTriggerConfig{ OperationTypes: []string{"INSERT", "UPDATE"}, Database: "sample_airbnb", @@ -39,7 +39,7 @@ func TestAccEventTrigger_basic(t *testing.T) { Name: acc.RandomName(), Type: "DATABASE", FunctionID: os.Getenv("MONGODB_REALM_FUNCTION_ID"), - Disabled: conversion.Pointer(false), + Disabled: conversion.Pointer(true), Config: &realm.EventTriggerConfig{ OperationTypes: []string{"INSERT", "UPDATE", "DELETE"}, Database: "sample_airbnb", @@ -58,6 +58,7 @@ func TestAccEventTrigger_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), + resource.TestCheckResourceAttr(resourceName, "disabled", "true"), ), }, { @@ -65,6 +66,7 @@ func TestAccEventTrigger_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "project_id", projectID), + resource.TestCheckResourceAttr(resourceName, "disabled", "true"), ), }, { diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go index 41781f07a0..124ca55523 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go @@ -123,7 +123,7 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), CheckDestroy: acc.CheckDestroyFederatedDatabaseInstance, Steps: []resource.TestStep{ { diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go index e2760e5a81..0b53bd5794 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go @@ -41,7 +41,7 @@ func basicTestCase(tb testing.TB, checkZoneID, withBackup bool) *resource.TestCa } return &resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(tb) }, + PreCheck: acc.PreCheckBasicSleep(tb), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ @@ -113,7 +113,7 @@ func TestAccGlobalClusterConfig_database(t *testing.T) { ) resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, Steps: []resource.TestStep{ diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go index 5fb300be5c..8dd34facfe 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go @@ -41,7 +41,7 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { ) resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckLDAPCert(t) }, + PreCheck: func() { acc.PreCheckLDAPCert(t); acc.SerialSleep(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { diff --git a/internal/service/onlinearchive/resource_online_archive_migration_test.go b/internal/service/onlinearchive/resource_online_archive_migration_test.go index 6035a59544..7d380bb8da 100644 --- a/internal/service/onlinearchive/resource_online_archive_migration_test.go +++ b/internal/service/onlinearchive/resource_online_archive_migration_test.go @@ -25,7 +25,7 @@ func TestMigBackupRSOnlineArchiveWithNoChangeBetweenVersions(t *testing.T) { config := configWithDailySchedule(clusterTerraformStr, clusterResourceName, 1, deleteExpirationDays) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { mig.PreCheckBasic(t) }, + PreCheck: mig.PreCheckBasicSleep(t), CheckDestroy: acc.CheckDestroyFederatedDatabaseInstance, Steps: []resource.TestStep{ { diff --git a/internal/service/onlinearchive/resource_online_archive_test.go b/internal/service/onlinearchive/resource_online_archive_test.go index 5f2e95b16d..8464ff3713 100644 --- a/internal/service/onlinearchive/resource_online_archive_test.go +++ b/internal/service/onlinearchive/resource_online_archive_test.go @@ -37,7 +37,7 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -135,7 +135,7 @@ func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -184,7 +184,7 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ @@ -227,7 +227,7 @@ func TestAccBackupRSOnlineArchiveInvalidProcessRegion(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckBasic(t) }, + PreCheck: acc.PreCheckBasicSleep(t), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ diff --git a/internal/service/organization/resource_organization.go b/internal/service/organization/resource_organization.go index 60dbb90919..d045408b69 100644 --- a/internal/service/organization/resource_organization.go +++ b/internal/service/organization/resource_organization.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "net/http" + "strings" "go.mongodb.org/atlas-sdk/v20240805004/admin" @@ -88,7 +89,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. conn := meta.(*config.MongoDBClient).AtlasV2 organization, resp, err := conn.OrganizationsApi.CreateOrganization(ctx, newCreateOrganizationRequest(d)).Execute() if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { + if resp.StatusCode == http.StatusNotFound && !strings.Contains(err.Error(), "USER_NOT_FOUND") { d.SetId("") return nil } diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index e2247ac0da..2fe2855989 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -143,6 +143,24 @@ func TestAccConfigRSOrganization_Settings(t *testing.T) { }) } +func TestAccOrganizationCreate_Errors(t *testing.T) { + var ( + roleName = "ORG_OWNER" + unknownUser = "65def6160f722a1507105aaa" + ) + acc.SkipTestForCI(t) // test will fail in CI since API_KEY_MUST_BE_ASSOCIATED_WITH_PAYING_ORG is returned + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.PreCheck(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Steps: []resource.TestStep{ + { + Config: configBasic(unknownUser, acc.RandomName(), "should fail since user is not found", roleName), + ExpectError: regexp.MustCompile(`USER_NOT_FOUND`), + }, + }, + }) +} + func checkExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go index 27c490da81..614451fdc5 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go @@ -41,7 +41,7 @@ func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { ) resource.Test(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckAwsEnvBasic(t); acc.PreCheckAwsRegionCases(t) }, + PreCheck: func() { acc.PreCheckAwsEnvBasic(t); acc.PreCheckAwsRegionCases(t); acc.SerialSleep(t) }, ExternalProviders: acc.ExternalProvidersOnlyAWS(), ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: checkDestroy, diff --git a/internal/service/project/resource_project.go b/internal/service/project/resource_project.go index 3d0b9d83b2..4f49d5347d 100644 --- a/internal/service/project/resource_project.go +++ b/internal/service/project/resource_project.go @@ -402,12 +402,8 @@ func (r *projectRS) Create(ctx context.Context, req resource.CreateRequest, resp } projectID := project.GetId() - projectRes, atlasResp, err := connV2.ProjectsApi.GetProject(ctx, projectID).Execute() + projectRes, _, err := connV2.ProjectsApi.GetProject(ctx, projectID).Execute() if err != nil { - if resp != nil && atlasResp.StatusCode == http.StatusNotFound { - resp.State.RemoveResource(ctx) - return - } resp.Diagnostics.AddError("error when getting project after create", fmt.Sprintf(ErrorProjectRead, projectID, err.Error())) return } @@ -529,12 +525,8 @@ func (r *projectRS) Update(ctx context.Context, req resource.UpdateRequest, resp return } - projectRes, atlasResp, err := connV2.ProjectsApi.GetProject(ctx, projectID).Execute() + projectRes, _, err := connV2.ProjectsApi.GetProject(ctx, projectID).Execute() if err != nil { - if resp != nil && atlasResp.StatusCode == http.StatusNotFound { - resp.State.RemoveResource(ctx) - return - } resp.Diagnostics.AddError("error when getting project after create", fmt.Sprintf(ErrorProjectRead, projectID, err.Error())) return } diff --git a/internal/service/projectipaddresses/data_source_schema.go b/internal/service/projectipaddresses/data_source_schema.go index b4ef487fad..96afd0f2e4 100644 --- a/internal/service/projectipaddresses/data_source_schema.go +++ b/internal/service/projectipaddresses/data_source_schema.go @@ -1,5 +1,3 @@ -// Code generated by terraform-plugin-framework-generator DO NOT EDIT. - package projectipaddresses import ( diff --git a/internal/service/pushbasedlogexport/resource.go b/internal/service/pushbasedlogexport/resource.go index a07850f4fd..bcccf344cd 100644 --- a/internal/service/pushbasedlogexport/resource.go +++ b/internal/service/pushbasedlogexport/resource.go @@ -101,8 +101,6 @@ func (r *pushBasedLogExportRS) Create(ctx context.Context, req resource.CreateRe resp.Diagnostics.AddError("Error when unconfiguring push-based log export configuration", err.Error()) return } - - resp.State.RemoveResource(ctx) return } @@ -121,8 +119,6 @@ func (r *pushBasedLogExportRS) Create(ctx context.Context, req resource.CreateRe resp.Diagnostics.AddError("Error when unconfiguring push-based log export configuration", err.Error()) return } - - resp.State.RemoveResource(ctx) return } diff --git a/internal/service/resourcepolicy/data_source.go b/internal/service/resourcepolicy/data_source.go new file mode 100644 index 0000000000..c77a7f9bfa --- /dev/null +++ b/internal/service/resourcepolicy/data_source.go @@ -0,0 +1,53 @@ +package resourcepolicy + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" +) + +var _ datasource.DataSource = &resourcePolicyDS{} +var _ datasource.DataSourceWithConfigure = &resourcePolicyDS{} + +const ( + errorReadDS = "error reading data source " + fullResourceName +) + +func DataSource() datasource.DataSource { + return &resourcePolicyDS{ + DSCommon: config.DSCommon{ + DataSourceName: resourceName, + }, + } +} + +type resourcePolicyDS struct { + config.DSCommon +} + +func (d *resourcePolicyDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = DataSourceSchema(ctx) +} + +func (d *resourcePolicyDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var cfg TFModel + resp.Diagnostics.Append(req.Config.Get(ctx, &cfg)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := d.Client.AtlasV2 + apiResp, _, err := connV2.ResourcePoliciesApi.GetAtlasResourcePolicy(ctx, cfg.OrgID.ValueString(), cfg.ID.ValueString()).Execute() + if err != nil { + resp.Diagnostics.AddError(errorReadDS, err.Error()) + return + } + + out, diags := NewTFModel(ctx, apiResp) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, out)...) +} diff --git a/internal/service/resourcepolicy/data_source_plural.go b/internal/service/resourcepolicy/data_source_plural.go new file mode 100644 index 0000000000..1e65e34ec7 --- /dev/null +++ b/internal/service/resourcepolicy/data_source_plural.go @@ -0,0 +1,57 @@ +package resourcepolicy + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" +) + +var _ datasource.DataSource = &resourcePolicysDS{} +var _ datasource.DataSourceWithConfigure = &resourcePolicysDS{} + +const ( + dataSourcePluralName = "resource_policies" + fullDataSourcePluralName = "mongodbatlas_" + dataSourcePluralName + errorReadDSP = "error reading plural data source " + fullDataSourcePluralName +) + +func PluralDataSource() datasource.DataSource { + return &resourcePolicysDS{ + DSCommon: config.DSCommon{ + DataSourceName: dataSourcePluralName, + }, + } +} + +type resourcePolicysDS struct { + config.DSCommon +} + +func (d *resourcePolicysDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = DataSourcePluralSchema(ctx) +} + +func (d *resourcePolicysDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var cfg TFModelDSP + resp.Diagnostics.Append(req.Config.Get(ctx, &cfg)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := d.Client.AtlasV2 + orgID := cfg.OrgID.ValueString() + apiResp, _, err := connV2.ResourcePoliciesApi.GetAtlasResourcePolicies(ctx, orgID).Execute() + + if err != nil { + resp.Diagnostics.AddError(errorReadDSP, err.Error()) + return + } + + newResourcePolicysModel, diags := NewTFModelDSP(ctx, orgID, apiResp) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newResourcePolicysModel)...) +} diff --git a/internal/service/resourcepolicy/data_source_plural_schema.go b/internal/service/resourcepolicy/data_source_plural_schema.go new file mode 100644 index 0000000000..1efd8a3aa6 --- /dev/null +++ b/internal/service/resourcepolicy/data_source_plural_schema.go @@ -0,0 +1,33 @@ +package resourcepolicy + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func DataSourcePluralSchema(ctx context.Context) schema.Schema { + dsAttributes := dataSourceSchema(true) + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "org_id": schema.StringAttribute{ + Required: true, + Description: "Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access.", + }, + "resource_policies": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: dsAttributes, + }, + Computed: true, + }, + }, + } +} + +type TFModelDSP struct { + OrgID types.String `tfsdk:"org_id"` + ResourcePolicies []TFModel `tfsdk:"resource_policies"` +} diff --git a/internal/service/resourcepolicy/data_source_schema.go b/internal/service/resourcepolicy/data_source_schema.go new file mode 100644 index 0000000000..6c888e4f43 --- /dev/null +++ b/internal/service/resourcepolicy/data_source_schema.go @@ -0,0 +1,102 @@ +package resourcepolicy + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func DataSourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: dataSourceSchema(false), + } +} + +func dataSourceSchema(isPlural bool) map[string]schema.Attribute { + return map[string]schema.Attribute{ + "created_by_user": schema.SingleNestedAttribute{ + Description: "The user that last updated the Atlas resource policy.", + MarkdownDescription: "The user that last updated the Atlas resource policy.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal character string that identifies a user.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies a user.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "Human-readable label that describes a user.", + MarkdownDescription: "Human-readable label that describes a user.", + Computed: true, + }, + }, + }, + "created_date": schema.StringAttribute{ + Description: "Date and time in UTC when the Atlas resource policy was created.", + MarkdownDescription: "Date and time in UTC when the Atlas resource policy was created.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal digit string that identifies an Atlas resource policy.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies an Atlas resource policy.", + Required: !isPlural, + Computed: isPlural, + }, + "last_updated_by_user": schema.SingleNestedAttribute{ + Description: "The user that last updated the Atlas resource policy.", + MarkdownDescription: "The user that last updated the Atlas resource policy.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal character string that identifies a user.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies a user.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "Human-readable label that describes a user.", + MarkdownDescription: "Human-readable label that describes a user.", + Computed: true, + }, + }, + }, + "last_updated_date": schema.StringAttribute{ + Description: "Date and time in UTC when the Atlas resource policy was last updated.", + MarkdownDescription: "Date and time in UTC when the Atlas resource policy was last updated.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "Human-readable label that describes the Atlas resource policy.", + MarkdownDescription: "Human-readable label that describes the Atlas resource policy.", + Computed: true, + }, + "org_id": schema.StringAttribute{ + Required: !isPlural, + Computed: isPlural, + Description: "Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access.", + }, + "policies": schema.ListNestedAttribute{ + Description: "List of policies that make up the Atlas resource policy.", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "body": schema.StringAttribute{ + Description: "A string that defines the permissions for the policy. The syntax used is the Cedar Policy language.", + MarkdownDescription: "A string that defines the permissions for the policy. The syntax used is the Cedar Policy language.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal character string that identifies the policy.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies the policy.", + Computed: true, + }, + }, + }, + }, + "version": schema.StringAttribute{ + Description: "A string that identifies the version of the Atlas resource policy.", + MarkdownDescription: "A string that identifies the version of the Atlas resource policy.", + Computed: true, + }, + } +} diff --git a/internal/service/resourcepolicy/main_test.go b/internal/service/resourcepolicy/main_test.go new file mode 100644 index 0000000000..5bc9a5e537 --- /dev/null +++ b/internal/service/resourcepolicy/main_test.go @@ -0,0 +1,15 @@ +package resourcepolicy_test + +import ( + "os" + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +func TestMain(m *testing.M) { + cleanup := acc.SetupSharedResources() + exitCode := m.Run() + cleanup() + os.Exit(exitCode) +} diff --git a/internal/service/resourcepolicy/model.go b/internal/service/resourcepolicy/model.go new file mode 100644 index 0000000000..0e65314c15 --- /dev/null +++ b/internal/service/resourcepolicy/model.go @@ -0,0 +1,87 @@ +package resourcepolicy + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "go.mongodb.org/atlas-sdk/v20240805004/admin" +) + +func NewTFModel(ctx context.Context, input *admin.ApiAtlasResourcePolicy) (*TFModel, diag.Diagnostics) { + diags := &diag.Diagnostics{} + createdByUser := NewUserMetadataObjectType(ctx, input.CreatedByUser, diags) + lastUpdatedByUser := NewUserMetadataObjectType(ctx, input.LastUpdatedByUser, diags) + policies := NewTFPolicies(ctx, input.Policies, diags) + if diags.HasError() { + return nil, *diags + } + return &TFModel{ + CreatedByUser: createdByUser, + CreatedDate: types.StringPointerValue(conversion.TimePtrToStringPtr(input.CreatedDate)), + ID: types.StringPointerValue(input.Id), + LastUpdatedByUser: lastUpdatedByUser, + LastUpdatedDate: types.StringPointerValue(conversion.TimePtrToStringPtr(input.LastUpdatedDate)), + Name: types.StringPointerValue(input.Name), + OrgID: types.StringPointerValue(input.OrgId), + Policies: policies, + Version: types.StringPointerValue(input.Version), + }, nil +} + +func NewUserMetadataObjectType(ctx context.Context, input *admin.ApiAtlasUserMetadata, diags *diag.Diagnostics) types.Object { + var nilPointer *admin.ApiAtlasUserMetadata + if input == nilPointer { + return types.ObjectNull(UserMetadataObjectType.AttrTypes) + } + tfModel := TFUserMetadataModel{ + ID: types.StringPointerValue(input.Id), + Name: types.StringPointerValue(input.Name), + } + objType, diagsLocal := types.ObjectValueFrom(ctx, UserMetadataObjectType.AttrTypes, tfModel) + diags.Append(diagsLocal...) + return objType +} + +func NewTFPolicies(ctx context.Context, input *[]admin.ApiAtlasPolicy, diags *diag.Diagnostics) []TFPolicyModel { + var nilPointer *[]admin.ApiAtlasPolicy + if input == nilPointer { + return []TFPolicyModel{} + } + tfModels := make([]TFPolicyModel, len(*input)) + for i, item := range *input { + tfModels[i] = TFPolicyModel{ + Body: types.StringPointerValue(item.Body), + ID: types.StringPointerValue(item.Id), + } + } + return tfModels +} + +func NewAdminPolicies(ctx context.Context, input []TFPolicyModel) []admin.ApiAtlasPolicyCreate { + apiModels := make([]admin.ApiAtlasPolicyCreate, len(input)) + for i, item := range input { + apiModels[i] = admin.ApiAtlasPolicyCreate{ + Body: item.Body.ValueString(), + } + } + return apiModels +} + +func NewTFModelDSP(ctx context.Context, orgID string, input []admin.ApiAtlasResourcePolicy) (*TFModelDSP, diag.Diagnostics) { + diags := &diag.Diagnostics{} + tfModels := make([]TFModel, len(input)) + for i, item := range input { + tfModel, diagsLocal := NewTFModel(ctx, &item) + diags.Append(diagsLocal...) + tfModels[i] = *tfModel + } + if diags.HasError() { + return nil, *diags + } + return &TFModelDSP{ + ResourcePolicies: tfModels, + OrgID: types.StringValue(orgID), + }, *diags +} diff --git a/internal/service/resourcepolicy/model_test.go b/internal/service/resourcepolicy/model_test.go new file mode 100644 index 0000000000..6d1a4d23d4 --- /dev/null +++ b/internal/service/resourcepolicy/model_test.go @@ -0,0 +1,184 @@ +package resourcepolicy_test + +import ( + "context" + _ "embed" + "encoding/json" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/resourcepolicy" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/unit" + "github.com/stretchr/testify/assert" + "go.mongodb.org/atlas-sdk/v20240805004/admin" +) + +var ( + //go:embed testdata/policy_clusterForbidCloudProvider.json + clusterForbidCloudProviderJSON string + //go:embed testdata/policy_multipleEntries.json + policyMultipleEntriesJSON string +) + +type tfModelTestCase struct { + name string + SDKRespJSON string + userIDCreate string + userIDUpdate string + userNameCreate string + userNameUpdate string + createdDate string + lastUpdatedDate string + orgID string + policyID string + version string +} + +func (tc *tfModelTestCase) addDefaults() { + if tc.userIDCreate == "" { + tc.userIDCreate = "65def6f00f722a1507105ad8" + } + if tc.userIDUpdate == "" { + tc.userIDUpdate = "65def6f00f722a1507105ad8" + } + if tc.userNameCreate == "" { + tc.userNameCreate = "mvccpeou" + } + if tc.userNameUpdate == "" { + tc.userNameUpdate = "mvccpeou" + } + if tc.createdDate == "" { + tc.createdDate = "2024-09-10T14:59:34Z" + } + if tc.lastUpdatedDate == "" { + tc.lastUpdatedDate = "2024-09-10T14:59:35Z" + } + if tc.orgID == "" { + tc.orgID = "65def6ce0f722a1507105aa5" + } + if tc.policyID == "" { + tc.policyID = "66e05ed6680f032312b6b22b" + } + if tc.version == "" { + tc.version = "v1" + } +} + +func parseSDKModel(t *testing.T, sdkRespJSON string) admin.ApiAtlasResourcePolicy { + t.Helper() + var SDKModel admin.ApiAtlasResourcePolicy + err := json.Unmarshal([]byte(sdkRespJSON), &SDKModel) + if err != nil { + t.Fatalf("failed to unmarshal sdk response: %s", err) + } + return SDKModel +} + +func createTFModel(t *testing.T, testCase *tfModelTestCase) *resourcepolicy.TFModel { + t.Helper() + testCase.addDefaults() + adminModel := parseSDKModel(t, testCase.SDKRespJSON) + policies := make([]resourcepolicy.TFPolicyModel, len(adminModel.GetPolicies())) + for i, policy := range adminModel.GetPolicies() { + policies[i] = resourcepolicy.TFPolicyModel{ + Body: types.StringPointerValue(policy.Body), + ID: types.StringPointerValue(policy.Id), + } + } + return &resourcepolicy.TFModel{ + CreatedByUser: unit.TFObjectValue(t, resourcepolicy.UserMetadataObjectType, resourcepolicy.TFUserMetadataModel{ + ID: types.StringValue(testCase.userIDCreate), + Name: types.StringValue(testCase.userNameCreate), + }), + LastUpdatedByUser: unit.TFObjectValue(t, resourcepolicy.UserMetadataObjectType, resourcepolicy.TFUserMetadataModel{ + ID: types.StringValue(testCase.userIDUpdate), + Name: types.StringValue(testCase.userNameUpdate), + }), + Policies: policies, + CreatedDate: types.StringValue(testCase.createdDate), + ID: types.StringValue(testCase.policyID), + LastUpdatedDate: types.StringValue(testCase.lastUpdatedDate), + Name: types.StringValue(testCase.name), + OrgID: types.StringValue(testCase.orgID), + Version: types.StringValue(testCase.version), + } +} + +func TestNewTFModel(t *testing.T) { + testCases := map[string]tfModelTestCase{ + "clusterForbidCloudProvider": { + name: "clusterForbidCloudProvider", + SDKRespJSON: clusterForbidCloudProviderJSON, + userIDUpdate: "65def6f00f722a1507105ad9", + userNameUpdate: "updateUser", + }, + "policyMultipleEntriesJSON": { + SDKRespJSON: policyMultipleEntriesJSON, + name: "multipleEntries", + createdDate: "2024-09-11T13:36:18Z", + lastUpdatedDate: "2024-09-11T13:36:18Z", + policyID: "66e19cd2fdc0332d1fa5e877", + }, + } + + for testName, tc := range testCases { + t.Run(testName, func(t *testing.T) { + SDKModel := parseSDKModel(t, tc.SDKRespJSON) + ctx := context.Background() + expectedModel := createTFModel(t, &tc) + resultModel, diags := resourcepolicy.NewTFModel(ctx, &SDKModel) + unit.AssertDiagsOK(t, diags) + assert.Equal(t, expectedModel, resultModel) + }) + } +} + +func TestNewUserMetadataObjectTypeWithNilArg(t *testing.T) { + ctx := context.Background() + var metadataNil *admin.ApiAtlasUserMetadata + diags := diag.Diagnostics{} + obj := resourcepolicy.NewUserMetadataObjectType(ctx, metadataNil, &diags) + unit.AssertDiagsOK(t, diags) + assert.Equal(t, types.ObjectNull(resourcepolicy.UserMetadataObjectType.AttrTypes), obj) +} + +func TestNewAdminPolicies(t *testing.T) { + ctx := context.Background() + policies := []resourcepolicy.TFPolicyModel{ + { + Body: types.StringValue("policy1"), + ID: types.StringValue("id1"), + }, + { + Body: types.StringValue("policy2"), + }, + } + apiModels := resourcepolicy.NewAdminPolicies(ctx, policies) + assert.Len(t, apiModels, 2) + assert.Equal(t, "policy1", apiModels[0].GetBody()) + assert.Equal(t, "policy2", apiModels[1].GetBody()) +} + +func TestNewTFModelDSP(t *testing.T) { + ctx := context.Background() + orgID := "65def6ce0f722a1507105aa5" + input := []admin.ApiAtlasResourcePolicy{ + parseSDKModel(t, clusterForbidCloudProviderJSON), + parseSDKModel(t, policyMultipleEntriesJSON), + } + resultModel, diags := resourcepolicy.NewTFModelDSP(ctx, orgID, input) + unit.AssertDiagsOK(t, diags) + assert.Len(t, resultModel.ResourcePolicies, 2) + + assert.Equal(t, orgID, resultModel.OrgID.ValueString()) +} + +func TestNewTFModelDSPEmptyModel(t *testing.T) { + ctx := context.Background() + orgID := "65def6ce0f722a1507105aa5" + resultModel, diags := resourcepolicy.NewTFModelDSP(ctx, orgID, []admin.ApiAtlasResourcePolicy{}) + unit.AssertDiagsOK(t, diags) + assert.Empty(t, resultModel.ResourcePolicies) + assert.Equal(t, orgID, resultModel.OrgID.ValueString()) +} diff --git a/internal/service/resourcepolicy/resource.go b/internal/service/resourcepolicy/resource.go new file mode 100644 index 0000000000..3dbe1a7819 --- /dev/null +++ b/internal/service/resourcepolicy/resource.go @@ -0,0 +1,190 @@ +package resourcepolicy + +import ( + "context" + "errors" + "fmt" + "net/http" + "regexp" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" + "go.mongodb.org/atlas-sdk/v20240805004/admin" +) + +var _ resource.ResourceWithConfigure = &resourcePolicyRS{} +var _ resource.ResourceWithImportState = &resourcePolicyRS{} +var _ resource.ResourceWithModifyPlan = &resourcePolicyRS{} + +const ( + resourceName = "resource_policy" + fullResourceName = "mongodbatlas_" + resourceName + errorCreate = "error creating resource " + fullResourceName + errorRead = "error reading resource " + fullResourceName + errorUpdate = "error updating resource " + fullResourceName +) + +func Resource() resource.Resource { + return &resourcePolicyRS{ + RSCommon: config.RSCommon{ + ResourceName: resourceName, + }, + } +} + +type resourcePolicyRS struct { + config.RSCommon +} + +func (r *resourcePolicyRS) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { + var policies []TFPolicyModel + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("policies"), &policies)...) + sdkPolicies := NewAdminPolicies(ctx, policies) + var orgID, name *string + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("org_id"), &orgID)...) + resp.Diagnostics.Append(req.Plan.GetAttribute(ctx, path.Root("name"), &name)...) + if resp.Diagnostics.HasError() { + return + } + if name == nil || orgID == nil { + return + } + sdkCreate := &admin.ApiAtlasResourcePolicyCreate{ + Name: *name, + Policies: sdkPolicies, + } + connV2 := r.Client.AtlasV2 + _, _, err := connV2.ResourcePoliciesApi.ValidateAtlasResourcePolicy(ctx, *orgID, sdkCreate).Execute() + if err != nil { + conversion.AddJSONBodyErrorToDiagnostics(fmt.Sprintf("Policy Validation failed (name=%s): ", *name), err, &resp.Diagnostics) + } +} + +func (r *resourcePolicyRS) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = ResourceSchema(ctx) +} + +func (r *resourcePolicyRS) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan TFModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + orgID := plan.OrgID.ValueString() + policies := NewAdminPolicies(ctx, plan.Policies) + + connV2 := r.Client.AtlasV2 + policySDK, _, err := connV2.ResourcePoliciesApi.CreateAtlasResourcePolicy(ctx, orgID, &admin.ApiAtlasResourcePolicyCreate{ + Name: plan.Name.ValueString(), + Policies: policies, + }).Execute() + if err != nil { + resp.Diagnostics.AddError(errorCreate, err.Error()) + return + } + newResourcePolicyModel, diags := NewTFModel(ctx, policySDK) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newResourcePolicyModel)...) +} + +func (r *resourcePolicyRS) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state TFModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + orgID := state.OrgID.ValueString() + resourcePolicyID := state.ID.ValueString() + connV2 := r.Client.AtlasV2 + policySDK, apiResp, err := connV2.ResourcePoliciesApi.GetAtlasResourcePolicy(ctx, orgID, resourcePolicyID).Execute() + + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + resp.State.RemoveResource(ctx) + return + } + resp.Diagnostics.AddError(errorRead, err.Error()) + return + } + + newResourcePolicyModel, diags := NewTFModel(ctx, policySDK) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newResourcePolicyModel)...) +} + +func (r *resourcePolicyRS) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan TFModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + orgID := plan.OrgID.ValueString() + resourcePolicyID := plan.ID.ValueString() + connV2 := r.Client.AtlasV2 + policies := NewAdminPolicies(ctx, plan.Policies) + editAdmin := admin.ApiAtlasResourcePolicyEdit{ + Name: plan.Name.ValueStringPointer(), + Policies: &policies, + } + policySDK, _, err := connV2.ResourcePoliciesApi.UpdateAtlasResourcePolicy(ctx, orgID, resourcePolicyID, &editAdmin).Execute() + + if err != nil { + resp.Diagnostics.AddError(errorUpdate, err.Error()) + return + } + newResourcePolicyModel, diags := NewTFModel(ctx, policySDK) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newResourcePolicyModel)...) +} + +func (r *resourcePolicyRS) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var resourcePolicyState *TFModel + resp.Diagnostics.Append(req.State.Get(ctx, &resourcePolicyState)...) + if resp.Diagnostics.HasError() { + return + } + orgID := resourcePolicyState.OrgID.ValueString() + resourcePolicyID := resourcePolicyState.ID.ValueString() + connV2 := r.Client.AtlasV2 + resourcePolicyAPI := connV2.ResourcePoliciesApi + if _, _, err := resourcePolicyAPI.DeleteAtlasResourcePolicy(ctx, orgID, resourcePolicyID).Execute(); err != nil { + resp.Diagnostics.AddError("error deleting resource", err.Error()) + return + } +} + +func (r *resourcePolicyRS) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + orgID, resourcePolicyID, err := splitImportID(req.ID) + if err != nil { + resp.Diagnostics.AddError("error splitting search deployment import ID", err.Error()) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("org_id"), orgID)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), resourcePolicyID)...) +} + +func splitImportID(id string) (orgID, resourcePolicyID string, err error) { + var re = regexp.MustCompile(`(?s)^([0-9a-fA-F]{24})-(.*)$`) + parts := re.FindStringSubmatch(id) + + if len(parts) != 3 { + err = errors.New("use the format {org_id}-{resource_policy_id}") + return + } + + orgID = parts[1] + resourcePolicyID = parts[2] + return +} diff --git a/internal/service/resourcepolicy/resource_migration_test.go b/internal/service/resourcepolicy/resource_migration_test.go new file mode 100644 index 0000000000..4ae76015c5 --- /dev/null +++ b/internal/service/resourcepolicy/resource_migration_test.go @@ -0,0 +1,12 @@ +package resourcepolicy_test + +import ( + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/mig" +) + +func TestMigResourcePolicy_basic(t *testing.T) { + mig.SkipIfVersionBelow(t, "1.21.0") // this feature was introduced in provider version 1.21.0 + mig.CreateAndRunTestNonParallel(t, basicTestCase(t)) +} diff --git a/internal/service/resourcepolicy/resource_schema.go b/internal/service/resourcepolicy/resource_schema.go new file mode 100644 index 0000000000..3aa68467d4 --- /dev/null +++ b/internal/service/resourcepolicy/resource_schema.go @@ -0,0 +1,134 @@ +package resourcepolicy + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ResourceSchema(ctx context.Context) schema.Schema { + return schema.Schema{ + Attributes: map[string]schema.Attribute{ + "created_by_user": schema.SingleNestedAttribute{ + Description: "The user that last updated the Atlas resource policy.", + MarkdownDescription: "The user that last updated the Atlas resource policy.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal character string that identifies a user.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies a user.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "Human-readable label that describes a user.", + MarkdownDescription: "Human-readable label that describes a user.", + Computed: true, + }, + }, + }, + "created_date": schema.StringAttribute{ + Description: "Date and time in UTC when the Atlas resource policy was created.", + MarkdownDescription: "Date and time in UTC when the Atlas resource policy was created.", + Computed: true, + }, + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal digit string that identifies an Atlas resource policy.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies an Atlas resource policy.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "last_updated_by_user": schema.SingleNestedAttribute{ + Description: "The user that last updated the Atlas resource policy.", + MarkdownDescription: "The user that last updated the Atlas resource policy.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal character string that identifies a user.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies a user.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "Human-readable label that describes a user.", + MarkdownDescription: "Human-readable label that describes a user.", + Computed: true, + }, + }, + }, + "last_updated_date": schema.StringAttribute{ + Description: "Date and time in UTC when the Atlas resource policy was last updated.", + MarkdownDescription: "Date and time in UTC when the Atlas resource policy was last updated.", + Computed: true, + }, + "name": schema.StringAttribute{ + Description: "Human-readable label that describes the Atlas resource policy.", + MarkdownDescription: "Human-readable label that describes the Atlas resource policy.", + Required: true, + }, + "org_id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access.", + MarkdownDescription: "Unique 24-hexadecimal digit string that identifies the organization that contains your projects. Use the [/orgs](#tag/Organizations/operation/listOrganizations) endpoint to retrieve all organizations to which the authenticated user has access.", + Required: true, + }, + "policies": schema.ListNestedAttribute{ + Description: "List of policies that make up the Atlas resource policy.", + MarkdownDescription: "List of policies that make up the Atlas resource policy.", + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "body": schema.StringAttribute{ + Description: "A string that defines the permissions for the policy. The syntax used is the Cedar Policy language.", + MarkdownDescription: "A string that defines the permissions for the policy. The syntax used is the Cedar Policy language.", + Required: true, + }, + "id": schema.StringAttribute{ + Description: "Unique 24-hexadecimal character string that identifies the policy.", + MarkdownDescription: "Unique 24-hexadecimal character string that identifies the policy.", + Computed: true, + }, + }, + }, + }, + "version": schema.StringAttribute{ + Description: "A string that identifies the version of the Atlas resource policy.", + MarkdownDescription: "A string that identifies the version of the Atlas resource policy.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +type TFModel struct { + CreatedByUser types.Object `tfsdk:"created_by_user"` + CreatedDate types.String `tfsdk:"created_date"` + ID types.String `tfsdk:"id"` + LastUpdatedByUser types.Object `tfsdk:"last_updated_by_user"` + LastUpdatedDate types.String `tfsdk:"last_updated_date"` + Name types.String `tfsdk:"name"` + OrgID types.String `tfsdk:"org_id"` + Version types.String `tfsdk:"version"` + Policies []TFPolicyModel `tfsdk:"policies"` +} + +type TFUserMetadataModel struct { + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` +} + +var UserMetadataObjectType = types.ObjectType{AttrTypes: map[string]attr.Type{ + "id": types.StringType, + "name": types.StringType, +}} + +type TFPolicyModel struct { + Body types.String `tfsdk:"body"` + ID types.String `tfsdk:"id"` +} diff --git a/internal/service/resourcepolicy/resource_test.go b/internal/service/resourcepolicy/resource_test.go new file mode 100644 index 0000000000..b3f2a1b19e --- /dev/null +++ b/internal/service/resourcepolicy/resource_test.go @@ -0,0 +1,247 @@ +package resourcepolicy_test + +import ( + "context" + "fmt" + "os" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +var ( + resourceType = "mongodbatlas_resource_policy" + resourceID = fmt.Sprintf("%s.test", resourceType) + dataSourceID = "data.mongodbatlas_resource_policy.test" + dataSourcePluralID = "data.mongodbatlas_resource_policies.test" + invalidPolicyUnknownCloudProvider = ` + forbid ( + principal, + action == cloud::Action::"cluster.createEdit", + resource + ) when { + context.cluster.cloudProviders.containsAny([cloud::cloudProvider::"aws222"]) + };` + invalidPolicyMissingComma = ` + forbid ( + principal, + action == cloud::Action::"cluster.createEdit" + resource + ) when { + context.cluster.cloudProviders.containsAny([cloud::cloudProvider::"aws"]) + };` + validPolicyForbidAwsCloudProvider = ` + forbid ( + principal, + action == cloud::Action::"cluster.createEdit", + resource + ) when { + context.cluster.cloudProviders.containsAny([cloud::cloudProvider::"aws"]) + };` + validPolicyProjectForbidIPAccessAnywhere = ` + forbid ( + principal, + action == cloud::Action::"project.edit", + resource + ) + when { + context.project.ipAccessList.contains(ip("0.0.0.0/0")) + };` +) + +func TestAccResourcePolicy_basic(t *testing.T) { + tc := basicTestCase(t) + resource.Test(t, *tc) +} + +func basicTestCase(t *testing.T) *resource.TestCase { + t.Helper() + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + policyName = "test-policy" + updatedName = "updated-policy" + ) + return &resource.TestCase{ // Need sequential execution for assertions to be deterministic (plural data source) + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: checkDestroy, + Steps: []resource.TestStep{ + { + Config: configBasic(orgID, policyName), + Check: checksResourcePolicy(orgID, policyName, 1), + }, + { + Config: configBasic(orgID, updatedName), + Check: checksResourcePolicy(orgID, updatedName, 1), + }, + { + Config: configBasic(orgID, updatedName), + ResourceName: resourceID, + ImportStateIdFunc: checkImportStateIDFunc(resourceID), + ImportState: true, + ImportStateVerify: true, + }, + }, + } +} + +func TestAccResourcePolicy_multipleNestedPolicies(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + ) + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: checkDestroy, + Steps: []resource.TestStep{ + { + Config: configWithPolicyBodies(orgID, "test-policy-multiple", validPolicyForbidAwsCloudProvider, validPolicyProjectForbidIPAccessAnywhere), + Check: checksResourcePolicy(orgID, "test-policy-multiple", 2), + }, + { + Config: configWithPolicyBodies(orgID, "test-policy-multiple", validPolicyForbidAwsCloudProvider, validPolicyProjectForbidIPAccessAnywhere), + ResourceName: resourceID, + ImportStateIdFunc: checkImportStateIDFunc(resourceID), + ImportState: true, + ImportStateVerify: true, + }, + }, + }, + ) +} + +func TestAccResourcePolicy_invalidConfig(t *testing.T) { + var ( + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + policyName = "test-policy-invalid" + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckBasic(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + CheckDestroy: checkDestroy, + Steps: []resource.TestStep{ + { + Config: configWithPolicyBodies(orgID, policyName, invalidPolicyMissingComma), + ExpectError: regexp.MustCompile("unexpected token `resource`"), + }, + { + Config: configWithPolicyBodies(orgID, policyName, invalidPolicyUnknownCloudProvider), + ExpectError: regexp.MustCompile(`entity id aws222 does not exist in the context of this organization`), + }, + { + Config: configWithPolicyBodies(orgID, policyName, validPolicyForbidAwsCloudProvider, invalidPolicyUnknownCloudProvider), + ExpectError: regexp.MustCompile(`entity id aws222 does not exist in the context of this organization`), + }, + }, + }, + ) +} + +func checksResourcePolicy(orgID, name string, policyCount int) resource.TestCheckFunc { + attrMap := map[string]string{ + "org_id": orgID, + "policies.#": fmt.Sprintf("%d", policyCount), + "name": name, + } + attrSet := []string{ + "created_by_user.id", + "created_by_user.name", + "created_date", + "last_updated_by_user.id", + "last_updated_by_user.name", + "last_updated_date", + "id", + "version", + } + pluralMap := map[string]string{ + "org_id": orgID, + "resource_policies.#": "1", + } + checks := []resource.TestCheckFunc{checkExists()} + checks = acc.AddAttrChecks(dataSourcePluralID, checks, pluralMap) + for i := 0; i < policyCount; i++ { + checks = acc.AddAttrSetChecks(resourceID, checks, fmt.Sprintf("policies.%d.body", i), fmt.Sprintf("policies.%d.id", i)) + checks = acc.AddAttrSetChecks(dataSourceID, checks, fmt.Sprintf("policies.%d.body", i), fmt.Sprintf("policies.%d.id", i)) + checks = acc.AddAttrSetChecks(dataSourcePluralID, checks, fmt.Sprintf("resource_policies.0.policies.%d.body", i), fmt.Sprintf("resource_policies.0.policies.%d.id", i)) + } + // cannot use dataSourcePluralID as it doesn't have the `results` attribute + return acc.CheckRSAndDS(resourceID, &dataSourceID, nil, attrSet, attrMap, resource.ComposeAggregateTestCheckFunc(checks...)) +} + +func configBasic(orgID, policyName string) string { + return configWithPolicyBodies(orgID, policyName, validPolicyForbidAwsCloudProvider) +} + +func configWithPolicyBodies(orgID, policyName string, bodies ...string) string { + policies := "" + for _, body := range bodies { + policies += fmt.Sprintf(` + { + body = < 0 { + _, _, err = teamsAPI.AddTeamUser(context.Background(), orgID, teamID, &userToAddModels).Execute() + if err != nil { + return err + } + } + + for i := range usersToRemove { + // remove user from team + _, err := teamsAPI.RemoveTeamUser(context.Background(), orgID, teamID, usersToRemove[i]).Execute() + if err != nil { + return err + } + } + + return nil +} + +func ValidateUsernames(c admin.MongoDBCloudUsersApi, usernames []string) ([]admin.CloudAppUser, error) { + var validUsers []admin.CloudAppUser + for _, elem := range usernames { + userToAdd, _, err := c.GetUserByUsername(context.Background(), elem).Execute() + if err != nil { + return nil, err + } + validUsers = append(validUsers, *userToAdd) + } + return validUsers, nil +} + +func GetChangesForTeamUsers(currentUsers, newUsers []admin.CloudAppUser) (toAdd, toDelete []string, err error) { + // Create two sets to store the elements of current and new users + currentUsersSet := InitUserSet(currentUsers) + newUsersSet := InitUserSet(newUsers) + + // Iterate over new users and add them to the toAdd array if they are not in current users + for elem := range newUsersSet { + if !currentUsersSet[elem] { + toAdd = append(toAdd, elem) + } + } + + // Iterate over current users and add them to the toDelete array if they are not in new users + for elem := range currentUsersSet { + if !newUsersSet[elem] { + toDelete = append(toDelete, elem) + } + } + + return toAdd, toDelete, nil +} + +func InitUserSet(users []admin.CloudAppUser) map[string]bool { + usersSet := make(map[string]bool, len(users)) + for i := 0; i < len(users); i++ { + usersSet[users[i].GetId()] = true + } + return usersSet +} diff --git a/internal/service/team/update_user_test.go b/internal/service/team/update_user_test.go new file mode 100644 index 0000000000..fe1ef6a3d4 --- /dev/null +++ b/internal/service/team/update_user_test.go @@ -0,0 +1,159 @@ +package team_test + +import ( + "errors" + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/team" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20240805004/admin" + "go.mongodb.org/atlas-sdk/v20240805004/mockadmin" +) + +func TestGetChangesForTeamUsers(t *testing.T) { + user1 := "user1" + user2 := "user2" + user3 := "user3" + + testCases := map[string]struct { + testName string + currentUsers []admin.CloudAppUser + newUsers []admin.CloudAppUser + expectedToAdd []string + expectedToDelete []string + }{ + "succeeds adding a new user and removing an existing one": { + currentUsers: []admin.CloudAppUser{ + {Id: &user1}, + {Id: &user2}, + }, + newUsers: []admin.CloudAppUser{ + {Id: &user1}, + {Id: &user3}, + }, + expectedToAdd: []string{user3}, + expectedToDelete: []string{user2}, + }, + "succeeds adding all users": { + currentUsers: []admin.CloudAppUser{}, + newUsers: []admin.CloudAppUser{ + {Id: &user1}, + {Id: &user2}, + }, + expectedToAdd: []string{user1, user2}, + expectedToDelete: []string{}, + }, + "succeeds removing both users": { + currentUsers: []admin.CloudAppUser{ + {Id: &user1}, + {Id: &user2}, + }, + newUsers: []admin.CloudAppUser{}, + expectedToAdd: []string{}, + expectedToDelete: []string{user1, user2}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.testName, func(t *testing.T) { + toAdd, toDelete, err := team.GetChangesForTeamUsers(testCase.currentUsers, testCase.newUsers) + require.NoError(t, err) + assert.ElementsMatch(t, testCase.expectedToAdd, toAdd) + assert.ElementsMatch(t, testCase.expectedToDelete, toDelete) + }) + } +} + +func TestUpdateTeamUsers(t *testing.T) { + validuser1 := "validuser1" + validuser2 := "validuser2" + invaliduser1 := "invaliduser1" + + testCases := map[string]struct { + mockFuncExpectations func(*mockadmin.TeamsApi, *mockadmin.MongoDBCloudUsersApi) + existingTeamUsers *admin.PaginatedApiAppUser + expectError require.ErrorAssertionFunc + testName string + usernames []string + }{ + "succeeds but no changes are required": { + mockFuncExpectations: func(mockTeamsApi *mockadmin.TeamsApi, mockUsersApi *mockadmin.MongoDBCloudUsersApi) { + mockValidUser1 := mockadmin.NewMongoDBCloudUsersApi(t) + mockValidUser2 := mockadmin.NewMongoDBCloudUsersApi(t) + mockUsersApi.EXPECT().GetUserByUsername(mock.Anything, validuser1).Return(admin.GetUserByUsernameApiRequest{ApiService: mockValidUser1}) + mockUsersApi.EXPECT().GetUserByUsername(mock.Anything, validuser2).Return(admin.GetUserByUsernameApiRequest{ApiService: mockValidUser2}) + mockValidUser1.EXPECT().GetUserByUsernameExecute(mock.Anything).Return(&admin.CloudAppUser{Id: &validuser1}, nil, nil) + mockValidUser2.EXPECT().GetUserByUsernameExecute(mock.Anything).Return(&admin.CloudAppUser{Id: &validuser2}, nil, nil) + }, + existingTeamUsers: &admin.PaginatedApiAppUser{Results: &[]admin.CloudAppUser{{Id: &validuser1}, {Id: &validuser2}}}, + usernames: []string{validuser1, validuser2}, + expectError: require.NoError, + }, + "fails because one user is invalid": { + mockFuncExpectations: func(mockTeamsApi *mockadmin.TeamsApi, mockUsersApi *mockadmin.MongoDBCloudUsersApi) { + mockUsersApi.EXPECT().GetUserByUsername(mock.Anything, invaliduser1).Return(admin.GetUserByUsernameApiRequest{ApiService: mockUsersApi}) + mockUsersApi.EXPECT().GetUserByUsernameExecute(mock.Anything).Return(nil, nil, errors.New("invalid username")) + }, + existingTeamUsers: nil, + usernames: []string{invaliduser1}, + expectError: require.Error, + }, + "succeeds with one user to be added": { + mockFuncExpectations: func(mockTeamsApi *mockadmin.TeamsApi, mockUsersApi *mockadmin.MongoDBCloudUsersApi) { + mockValidUser1 := mockadmin.NewMongoDBCloudUsersApi(t) + mockValidUser2 := mockadmin.NewMongoDBCloudUsersApi(t) + mockUsersApi.EXPECT().GetUserByUsername(mock.Anything, validuser1).Return(admin.GetUserByUsernameApiRequest{ApiService: mockValidUser1}) + mockUsersApi.EXPECT().GetUserByUsername(mock.Anything, validuser2).Return(admin.GetUserByUsernameApiRequest{ApiService: mockValidUser2}) + mockValidUser1.EXPECT().GetUserByUsernameExecute(mock.Anything).Return(&admin.CloudAppUser{Id: &validuser1}, nil, nil) + mockValidUser2.EXPECT().GetUserByUsernameExecute(mock.Anything).Return(&admin.CloudAppUser{Id: &validuser2}, nil, nil) + + mockTeamsApi.EXPECT().AddTeamUser(mock.Anything, mock.Anything, mock.Anything, &[]admin.AddUserToTeam{{Id: validuser2}}).Return(admin.AddTeamUserApiRequest{ApiService: mockTeamsApi}) + mockTeamsApi.EXPECT().AddTeamUserExecute(mock.Anything).Return(nil, nil, nil) + }, + existingTeamUsers: &admin.PaginatedApiAppUser{Results: &[]admin.CloudAppUser{{Id: &validuser1}}}, + usernames: []string{validuser1, validuser2}, + expectError: require.NoError, + }, + "succeeds with one user to be removed": { + mockFuncExpectations: func(mockTeamsApi *mockadmin.TeamsApi, mockUsersApi *mockadmin.MongoDBCloudUsersApi) { + mockValidUser2 := mockadmin.NewMongoDBCloudUsersApi(t) + mockUsersApi.EXPECT().GetUserByUsername(mock.Anything, validuser2).Return(admin.GetUserByUsernameApiRequest{ApiService: mockValidUser2}) + mockValidUser2.EXPECT().GetUserByUsernameExecute(mock.Anything).Return(&admin.CloudAppUser{Id: &validuser2}, nil, nil) + + mockTeamsApi.EXPECT().RemoveTeamUser(mock.Anything, mock.Anything, mock.Anything, validuser1).Return(admin.RemoveTeamUserApiRequest{ApiService: mockTeamsApi}) + mockTeamsApi.EXPECT().RemoveTeamUserExecute(mock.Anything).Return(nil, nil) + }, + existingTeamUsers: &admin.PaginatedApiAppUser{Results: &[]admin.CloudAppUser{{Id: &validuser1}, {Id: &validuser2}}}, + usernames: []string{validuser2}, + expectError: require.NoError, + }, + "succeeds with one user to be added and the other removed": { + mockFuncExpectations: func(mockTeamsApi *mockadmin.TeamsApi, mockUsersApi *mockadmin.MongoDBCloudUsersApi) { + mockValidUser2 := mockadmin.NewMongoDBCloudUsersApi(t) + mockUsersApi.EXPECT().GetUserByUsername(mock.Anything, validuser1).Return(admin.GetUserByUsernameApiRequest{ApiService: mockValidUser2}) + mockValidUser2.EXPECT().GetUserByUsernameExecute(mock.Anything).Return(&admin.CloudAppUser{Id: &validuser1}, nil, nil) + + addCall := mockTeamsApi.EXPECT().AddTeamUser(mock.Anything, mock.Anything, mock.Anything, &[]admin.AddUserToTeam{{Id: validuser1}}).Return(admin.AddTeamUserApiRequest{ApiService: mockTeamsApi}) + mockTeamsApi.EXPECT().AddTeamUserExecute(mock.Anything).Return(nil, nil, nil) + + removeCall := mockTeamsApi.EXPECT().RemoveTeamUser(mock.Anything, mock.Anything, mock.Anything, validuser2).Return(admin.RemoveTeamUserApiRequest{ApiService: mockTeamsApi}) + removeCall.NotBefore(addCall.Call) // Ensures new additions are made before removing + mockTeamsApi.EXPECT().RemoveTeamUserExecute(mock.Anything).Return(nil, nil) + }, + existingTeamUsers: &admin.PaginatedApiAppUser{Results: &[]admin.CloudAppUser{{Id: &validuser2}}}, + usernames: []string{validuser1}, + expectError: require.NoError, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.testName, func(t *testing.T) { + mockTeamsAPI := mockadmin.NewTeamsApi(t) + mockUsersAPI := mockadmin.NewMongoDBCloudUsersApi(t) + testCase.mockFuncExpectations(mockTeamsAPI, mockUsersAPI) + testCase.expectError(t, team.UpdateTeamUsers(mockTeamsAPI, mockUsersAPI, testCase.existingTeamUsers, testCase.usernames, "orgID", "teamID")) + }) + } +} diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index 74e9dedff2..336962cf80 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -15,6 +15,16 @@ func PreCheckBasic(tb testing.TB) { } } +// PreCheckBasicSleep is a helper function to call SerialSleep, see its help for more info. +// Some examples of use are when the test is calling ProjectIDExecution or GetClusterInfo to create clusters. +func PreCheckBasicSleep(tb testing.TB) func() { + tb.Helper() + return func() { + PreCheckBasic(tb) + SerialSleep(tb) + } +} + // PreCheck checks common Atlas environment variables and MONGODB_ATLAS_PROJECT_ID. // Deprecated: it should not be used as MONGODB_ATLAS_PROJECT_ID is not intended to be used in CI. // Use PreCheckBasic instead. @@ -82,6 +92,14 @@ func PreCheckAtlasUsername(tb testing.TB) { } } +func PreCheckAtlasUsernames(tb testing.TB) { + tb.Helper() + PreCheckAtlasUsername(tb) + if os.Getenv("MONGODB_ATLAS_USERNAME_2") == "" { + tb.Fatal("`MONGODB_ATLAS_USERNAME_2` must be set") + } +} + func PreCheckProjectTeamsIDsWithMinCount(tb testing.TB, minTeamsCount int) { tb.Helper() envVar := os.Getenv("MONGODB_ATLAS_TEAMS_IDS") diff --git a/internal/testutil/acc/shared_resource.go b/internal/testutil/acc/shared_resource.go index f50ddba133..3c4f42ed41 100644 --- a/internal/testutil/acc/shared_resource.go +++ b/internal/testutil/acc/shared_resource.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" "testing" + "time" "github.com/stretchr/testify/require" ) @@ -84,10 +85,29 @@ func ClusterNameExecution(tb testing.TB) (projectID, clusterName string) { return sharedInfo.projectID, sharedInfo.clusterName } +// SerialSleep waits a few seconds the first time so the first cluster in a project is not created concurrently, see HELP-65223. +// This must be called once the test is marked as parallel, e.g. in PreCheck inside Terraform tests. +func SerialSleep(tb testing.TB) { + tb.Helper() + SkipInUnitTest(tb) + require.True(tb, sharedInfo.init, "SetupSharedResources must called from TestMain test package") + + sharedInfo.muSleep.Lock() + defer sharedInfo.muSleep.Unlock() + + if sharedInfo.alreadySlept { + return + } + time.Sleep(5 * time.Second) + sharedInfo.alreadySlept = true +} + var sharedInfo = struct { - projectID string - projectName string - clusterName string - mu sync.Mutex - init bool + projectID string + projectName string + clusterName string + mu sync.Mutex + muSleep sync.Mutex + alreadySlept bool + init bool }{} diff --git a/internal/testutil/mig/pre_check.go b/internal/testutil/mig/pre_check.go index ebd1233b18..acfc144728 100644 --- a/internal/testutil/mig/pre_check.go +++ b/internal/testutil/mig/pre_check.go @@ -12,6 +12,16 @@ func PreCheckBasic(tb testing.TB) { acc.PreCheckBasic(tb) } +// PreCheckBasicSleep is a helper function to call SerialSleep, see its help for more info. +// Some examples of use are when the test is calling ProjectIDExecution or GetClusterInfo to create clusters. +func PreCheckBasicSleep(tb testing.TB) func() { + tb.Helper() + return func() { + PreCheckBasic(tb) + acc.SerialSleep(tb) + } +} + func PreCheck(tb testing.TB) { tb.Helper() checkLastVersion(tb) diff --git a/internal/testutil/unit/fw_model.go b/internal/testutil/unit/fw_model.go new file mode 100644 index 0000000000..53af4ea84e --- /dev/null +++ b/internal/testutil/unit/fw_model.go @@ -0,0 +1,32 @@ +package unit + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func TFObjectValue(t *testing.T, objType types.ObjectType, attributes any) types.Object { + t.Helper() + ctx := context.Background() + object, diags := types.ObjectValueFrom(ctx, objType.AttrTypes, attributes) + AssertDiagsOK(t, diags) + return object +} + +func TFListValue(t *testing.T, elementType types.ObjectType, tfList any) types.List { + t.Helper() + ctx := context.Background() + list, diags := types.ListValueFrom(ctx, elementType, tfList) + AssertDiagsOK(t, diags) + return list +} + +func AssertDiagsOK(t *testing.T, diags diag.Diagnostics) { + t.Helper() + if diags.HasError() { + t.Errorf("unexpected errors found: %s", diags.Errors()[0].Summary()) + } +} diff --git a/scripts/generate-doc.sh b/scripts/generate-doc.sh index ed9f8926c4..09d47aa198 100755 --- a/scripts/generate-doc.sh +++ b/scripts/generate-doc.sh @@ -32,7 +32,7 @@ set -euo pipefail -TF_VERSION="${TF_VERSION:-"1.9.2"}" # TF version to use when running tfplugindocs. Default: 1.9.2 +TF_VERSION="${TF_VERSION:-"1.9.7"}" # TF version to use when running tfplugindocs. Default: 1.9.7 TEMPLATE_FOLDER_PATH="${TEMPLATE_FOLDER_PATH:-"templates"}" # PATH to the templates folder. Default: templates diff --git a/scripts/generate-docs-all.sh b/scripts/generate-docs-all.sh index 15251d6099..211e0593fb 100755 --- a/scripts/generate-docs-all.sh +++ b/scripts/generate-docs-all.sh @@ -26,7 +26,7 @@ set -euo pipefail -TF_VERSION="${TF_VERSION:-"1.9.2"}" # TF version to use when running tfplugindocs. Default: 1.9.2 +TF_VERSION="${TF_VERSION:-"1.9.7"}" # TF version to use when running tfplugindocs. Default: 1.9.7 TEMPLATE_FOLDER_PATH="${TEMPLATE_FOLDER_PATH:-"templates"}" # PATH to the templates folder. Default: templates # ensure preview resource and data sources are also included during generation diff --git a/scripts/update-sdk.sh b/scripts/update-sdk.sh index b31a10caf4..c3f1c6baad 100755 --- a/scripts/update-sdk.sh +++ b/scripts/update-sdk.sh @@ -16,10 +16,20 @@ set -euo pipefail +CURRENT_SDK_RELEASE=$(grep 'go.mongodb.org/atlas-sdk/v' go.mod | + awk '{print $1}' | + sed 's/go.mongodb.org\/atlas-sdk\///' | + sort -V | + tail -n 1) +echo "CURRENT_SDK_RELEASE: $CURRENT_SDK_RELEASE" + LATEST_SDK_TAG=$(curl -sSfL -X GET https://api.github.com/repos/mongodb/atlas-sdk-go/releases/latest | jq -r '.tag_name') +echo "LATEST_SDK_TAG: $LATEST_SDK_TAG" LATEST_SDK_RELEASE=$(echo "${LATEST_SDK_TAG}" | cut -d '.' -f 1) -echo "==> Updating SDK to latest major version ${LATEST_SDK_TAG}" -gomajor get "go.mongodb.org/atlas-sdk/${LATEST_SDK_RELEASE}@${LATEST_SDK_TAG}" +echo "LATEST_SDK_RELEASE: $LATEST_SDK_RELEASE" +echo "==> Updating SDK ${CURRENT_SDK_RELEASE} to latest major version ${LATEST_SDK_TAG}" + +gomajor get --rewrite "go.mongodb.org/atlas-sdk/${CURRENT_SDK_RELEASE}" "go.mongodb.org/atlas-sdk/${LATEST_SDK_RELEASE}@${LATEST_SDK_TAG}" go mod tidy echo "Done" diff --git a/templates/data-sources/resource_policies.md.tmpl b/templates/data-sources/resource_policies.md.tmpl new file mode 100644 index 0000000000..c27421881e --- /dev/null +++ b/templates/data-sources/resource_policies.md.tmpl @@ -0,0 +1,13 @@ +# {{.Type}}: {{.Name}} + +`{{.Name}}` returns all resource policies in an organization. + +-> **NOTE**: Resource Policies are currently in Private Preview. To use this feature, you must take the following actions: +1. Enable the `Atlas Resource Policies` Preview Feature in your organization (contact [MongoDB Support](https://www.mongodb.com/services/support)). +2. Enable the [Preview Features](https://github.com/mongodb/terraform-provider-mongodbatlas?tab=readme-ov-file#preview-features) when running `terraform` commands. + +## Example Usages +{{ tffile (printf "examples/mongodbatlas_resource_policy/main.tf" )}} + +{{ .SchemaMarkdown | trimspace }} + diff --git a/templates/data-sources/resource_policy.md.tmpl b/templates/data-sources/resource_policy.md.tmpl new file mode 100644 index 0000000000..bbb0f96896 --- /dev/null +++ b/templates/data-sources/resource_policy.md.tmpl @@ -0,0 +1,13 @@ +# {{.Type}}: {{.Name}} + +`{{.Name}}` describes a resource policy in an organization. + +-> **NOTE**: Resource Policies are currently in Private Preview. To use this feature, you must take the following actions: +1. Enable the `Atlas Resource Policies` Preview Feature in your organization (contact [MongoDB Support](https://www.mongodb.com/services/support)). +2. Enable the [Preview Features](https://github.com/mongodb/terraform-provider-mongodbatlas?tab=readme-ov-file#preview-features) when running `terraform` commands. + +## Example Usages +{{ tffile (printf "examples/%s/main.tf" .Name )}} + +{{ .SchemaMarkdown | trimspace }} + diff --git a/templates/resources/resource_policy.md.tmpl b/templates/resources/resource_policy.md.tmpl new file mode 100644 index 0000000000..0413c26820 --- /dev/null +++ b/templates/resources/resource_policy.md.tmpl @@ -0,0 +1,21 @@ +# {{.Type}}: {{.Name}} + +`{{.Name}}` provides a Resource Policy resource. The resource lets you create, edit and delete resource policies to prevent misconfigurations and reduce the need for corrective interventions in your organization. + +-> **NOTE**: Resource Policies are currently in Private Preview. To use this feature, you must take the following actions: +1. Enable the `Atlas Resource Policies` Preview Feature in your organization (contact [MongoDB Support](https://www.mongodb.com/services/support)). +2. Enable the [Preview Features](https://github.com/mongodb/terraform-provider-mongodbatlas?tab=readme-ov-file#preview-features) when running `terraform` commands. + + +## Example Usages + +{{ tffile (printf "examples/%s/main.tf" .Name )}} + +{{ .SchemaMarkdown | trimspace }} + +# Import +Resource Policy resource can be imported using the org ID and policy ID, in the format `{ORG_ID}-{POLICY_ID}`, e.g. + +``` +terraform import mongodbatlas_resource_policy.cloud_region 65def6ce0f722a1507105aa5-66f1c018dba9c04e7dcfaf36 +``` diff --git a/tools/check-changelog-entry-file/main.go b/tools/check-changelog-entry-file/main.go index c11edccd29..a7926ac78f 100644 --- a/tools/check-changelog-entry-file/main.go +++ b/tools/check-changelog-entry-file/main.go @@ -12,7 +12,7 @@ import ( var ( skipLabelName = "skip-changelog-check" - skipTitles = []string{"chore", "test", "doc", "ci"} // Dependabot uses chore. + skipTitles = []string{"chore", "test", "doc", "ci", "refactor"} // Dependabot uses chore. allowedTypeValues = getValidTypes("scripts/changelog/allowed-types.txt") typesRequireResourcePrefix = []string{"breaking-change", "enhancement", "bug"} ) diff --git a/tools/scaffold/scaffold.go b/tools/scaffold/scaffold.go index 406b882ea8..441e2dc989 100644 --- a/tools/scaffold/scaffold.go +++ b/tools/scaffold/scaffold.go @@ -79,6 +79,10 @@ func filesToGenerate(params *ScaffoldParams) ([]FileGeneration, error) { TemplatePath: "tools/scaffold/template/generator_config.tmpl", OutputPath: fmt.Sprintf("%s/tfplugingen/generator_config.yml", folderPath), }, + { + TemplatePath: "tools/scaffold/template/main_test.tmpl", + OutputPath: fmt.Sprintf("%s/main_test.go", folderPath), + }, }, nil case DataSourceCmd: return []FileGeneration{ @@ -88,7 +92,7 @@ func filesToGenerate(params *ScaffoldParams) ([]FileGeneration, error) { }, { TemplatePath: "tools/scaffold/template/acc_test.tmpl", - OutputPath: fmt.Sprintf("%s/data_source_test.go", folderPath), + OutputPath: fmt.Sprintf("%s/resource_test.go", folderPath), }, { TemplatePath: "tools/scaffold/template/model.tmpl", @@ -102,6 +106,10 @@ func filesToGenerate(params *ScaffoldParams) ([]FileGeneration, error) { TemplatePath: "tools/scaffold/template/generator_config.tmpl", OutputPath: fmt.Sprintf("%s/tfplugingen/generator_config.yml", folderPath), }, + { + TemplatePath: "tools/scaffold/template/main_test.tmpl", + OutputPath: fmt.Sprintf("%s/main_test.go", folderPath), + }, }, nil case PluralDataSourceCmd: return []FileGeneration{ @@ -111,7 +119,7 @@ func filesToGenerate(params *ScaffoldParams) ([]FileGeneration, error) { }, { TemplatePath: "tools/scaffold/template/acc_test.tmpl", - OutputPath: fmt.Sprintf("%s/plural_data_source_test.go", folderPath), + OutputPath: fmt.Sprintf("%s/resource_test.go", folderPath), }, { TemplatePath: "tools/scaffold/template/model.tmpl", @@ -125,6 +133,10 @@ func filesToGenerate(params *ScaffoldParams) ([]FileGeneration, error) { TemplatePath: "tools/scaffold/template/generator_config.tmpl", OutputPath: fmt.Sprintf("%s/tfplugingen/generator_config.yml", folderPath), }, + { + TemplatePath: "tools/scaffold/template/main_test.tmpl", + OutputPath: fmt.Sprintf("%s/main_test.go", folderPath), + }, }, nil default: return nil, errors.New("unknown generation type provided") diff --git a/tools/scaffold/template/datasource.tmpl b/tools/scaffold/template/datasource.tmpl index b0e3b9d0da..b191d3bb92 100644 --- a/tools/scaffold/template/datasource.tmpl +++ b/tools/scaffold/template/datasource.tmpl @@ -9,32 +9,32 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) -const {{.NameCamelCase}}Name = "{{.NameSnakeCase}}" // TODO: if resource exists this can be deleted +const resourceName = "{{.NameSnakeCase}}" // TODO: if resource exists this can be deleted -var _ datasource.DataSource = &{{.NameCamelCase}}DS{} -var _ datasource.DataSourceWithConfigure = &{{.NameCamelCase}}DS{} +var _ datasource.DataSource = &ds{} +var _ datasource.DataSourceWithConfigure = &ds{} func DataSource() datasource.DataSource { - return &{{.NameCamelCase}}DS{ + return &ds{ DSCommon: config.DSCommon{ - DataSourceName: {{.NameCamelCase}}Name, + DataSourceName: resourceName, }, } } -type {{.NameCamelCase}}DS struct { +type ds struct { config.DSCommon } -func (d *{{.NameCamelCase}}DS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { +func (d *ds) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { // TODO: Schema and model must be defined in data_source_schema.go. Details on scaffolding this file found in contributing/development-best-practices.md under "Scaffolding Schema and Model Definitions" resp.Schema = DataSourceSchema(ctx) } -func (d *{{.NameCamelCase}}DS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - var {{.NameCamelCase}}Config TF{{.NamePascalCase}}Model - resp.Diagnostics.Append(req.Config.Get(ctx, &{{.NameCamelCase}}Config)...) +func (d *ds) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var tfModel TFModel + resp.Diagnostics.Append(req.Config.Get(ctx, &tfModel)...) if resp.Diagnostics.HasError() { return } @@ -48,7 +48,7 @@ func (d *{{.NameCamelCase}}DS) Read(ctx context.Context, req datasource.ReadRequ //} // TODO: process response into new terraform state - new{{.NamePascalCase}}Model, diags := NewTF{{.NamePascalCase}}(ctx, apiResp) + new{{.NamePascalCase}}Model, diags := NewTFModel(ctx, apiResp) if diags.HasError() { resp.Diagnostics.Append(diags...) return diff --git a/tools/scaffold/template/main_test.tmpl b/tools/scaffold/template/main_test.tmpl new file mode 100644 index 0000000000..85c3a03135 --- /dev/null +++ b/tools/scaffold/template/main_test.tmpl @@ -0,0 +1,15 @@ +package {{.NameLowerNoSpaces}}_test + +import ( + "os" + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" +) + +func TestMain(m *testing.M) { + cleanup := acc.SetupSharedResources() + exitCode := m.Run() + cleanup() + os.Exit(exitCode) +} diff --git a/tools/scaffold/template/model.tmpl b/tools/scaffold/template/model.tmpl index 067365ef41..558d6e4165 100644 --- a/tools/scaffold/template/model.tmpl +++ b/tools/scaffold/template/model.tmpl @@ -8,18 +8,18 @@ import ( ) // TODO: `ctx` parameter and `diags` return value can be removed if tf schema has no complex data types (e.g., schema.ListAttribute, schema.SetAttribute) -func NewTF{{.NamePascalCase}}(ctx context.Context, apiResp *admin.{{.NamePascalCase}}) (*TF{{.NamePascalCase}}Model, diag.Diagnostics) { +func NewTFModel(ctx context.Context, apiResp *admin.{{.NamePascalCase}}) (*TFModel, diag.Diagnostics) { // complexAttr, diagnostics := types.ListValueFrom(ctx, InnerObjectType, newTFComplexAttrModel(apiResp.ComplexAttr)) // if diagnostics.HasError() { // return nil, diagnostics // } - return &TF{{.NamePascalCase}}Model{}, nil + return &TFModel{}, nil } {{if eq .GenerationType "resource"}} // TODO: If SDK defined different models for create and update separate functions will need to be defined. // TODO: `ctx` parameter and `diags` in return value can be removed if tf schema has no complex data types (e.g., schema.ListAttribute, schema.SetAttribute) -func New{{.NamePascalCase}}Req(ctx context.Context, plan *TF{{.NamePascalCase}}Model) (*admin.{{.NamePascalCase}}, diag.Diagnostics) { +func NewAtlasReq(ctx context.Context, plan *TFModel) (*admin.{{.NamePascalCase}}, diag.Diagnostics) { // var tfList []complexArgumentData // resp.Diagnostics.Append(plan.ComplexArgument.ElementsAs(ctx, &tfList, false)...) // if resp.Diagnostics.HasError() { @@ -30,7 +30,7 @@ func New{{.NamePascalCase}}Req(ctx context.Context, plan *TF{{.NamePascalCase}}M {{end}} {{if eq .GenerationType "plural-data-source"}} -func NewTF{{.NamePascalCase}}s(ctx context.Context, paginatedResult *admin.Paginated{{.NamePascalCase}}) (*TF{{.NamePascalCase}}sDSModel, diag.Diagnostics) { +func NewTFModelPluralDS(ctx context.Context, paginatedResult *admin.Paginated{{.NamePascalCase}}) (*TF{{.NamePascalCase}}sDSModel, diag.Diagnostics) { return &TF{{.NamePascalCase}}sDSModel{}, nil } -{{end}} \ No newline at end of file +{{end}} diff --git a/tools/scaffold/template/model_test.tmpl b/tools/scaffold/template/model_test.tmpl index c028a26418..aa4551a559 100644 --- a/tools/scaffold/template/model_test.tmpl +++ b/tools/scaffold/template/model_test.tmpl @@ -11,7 +11,7 @@ import ( type sdkToTFModelTestCase struct { SDKResp *admin.{{.NamePascalCase}} - expectedTFModel *{{.NameLowerNoSpaces}}.TF{{.NamePascalCase}}Model + expectedTFModel *{{.NameLowerNoSpaces}}.TFModel } func Test{{.NamePascalCase}}SDKToTFModel(t *testing.T) { @@ -19,14 +19,14 @@ func Test{{.NamePascalCase}}SDKToTFModel(t *testing.T) { "Complete SDK response": { SDKResp: &admin.{{.NamePascalCase}}{ }, - expectedTFModel: &{{.NameLowerNoSpaces}}.TF{{.NamePascalCase}}Model{ + expectedTFModel: &{{.NameLowerNoSpaces}}.TFModel{ }, }, } for testName, tc := range testCases { t.Run(testName, func(t *testing.T) { - resultModel, diags := {{.NameLowerNoSpaces}}.NewTF{{.NamePascalCase}}(context.Background(), tc.SDKResp) + resultModel, diags := {{.NameLowerNoSpaces}}.NewTFModel(context.Background(), tc.SDKResp) if diags.HasError() { t.Errorf("unexpected errors found: %s", diags.Errors()[0].Summary()) } @@ -37,14 +37,14 @@ func Test{{.NamePascalCase}}SDKToTFModel(t *testing.T) { {{if eq .GenerationType "resource"}} type tfToSDKModelTestCase struct { - tfModel *{{.NameLowerNoSpaces}}.TF{{.NamePascalCase}}Model + tfModel *{{.NameLowerNoSpaces}}.TFModel expectedSDKReq *admin.{{.NamePascalCase}} } func Test{{.NamePascalCase}}TFModelToSDK(t *testing.T) { testCases := map[string]tfToSDKModelTestCase{ "Complete TF state": { - tfModel: &{{.NameLowerNoSpaces}}.TF{{.NamePascalCase}}Model{ + tfModel: &{{.NameLowerNoSpaces}}.TFModel{ }, expectedSDKReq: &admin.{{.NamePascalCase}}{ }, @@ -53,7 +53,7 @@ func Test{{.NamePascalCase}}TFModelToSDK(t *testing.T) { for testName, tc := range testCases { t.Run(testName, func(t *testing.T) { - apiReqResult, diags := {{.NameLowerNoSpaces}}.New{{.NamePascalCase}}Req(context.Background(), tc.tfModel) + apiReqResult, diags := {{.NameLowerNoSpaces}}.NewAtlasReq(context.Background(), tc.tfModel) if diags.HasError() { t.Errorf("unexpected errors found: %s", diags.Errors()[0].Summary()) } diff --git a/tools/scaffold/template/pluraldatasource.tmpl b/tools/scaffold/template/pluraldatasource.tmpl index f3ed10aada..98e7efbbba 100644 --- a/tools/scaffold/template/pluraldatasource.tmpl +++ b/tools/scaffold/template/pluraldatasource.tmpl @@ -11,29 +11,29 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/dsschema" ) -var _ datasource.DataSource = &{{.NameCamelCase}}sDS{} -var _ datasource.DataSourceWithConfigure = &{{.NameCamelCase}}sDS{} +var _ datasource.DataSource = &pluralDS{} +var _ datasource.DataSourceWithConfigure = &pluralDS{} func PluralDataSource() datasource.DataSource { - return &{{.NameCamelCase}}sDS{ + return &pluralDS{ DSCommon: config.DSCommon{ - DataSourceName: fmt.Sprintf("%ss", {{.NameCamelCase}}Name), + DataSourceName: fmt.Sprintf("%ss", resourceName), }, } } -type {{.NameCamelCase}}sDS struct { +type pluralDS struct { config.DSCommon } -func (d *{{.NameCamelCase}}sDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { +func (d *pluralDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { // TODO: Schema and model must be defined in plural_data_source_schema.go. Details on scaffolding this file found in contributing/development-best-practices.md under "Scaffolding Schema and Model Definitions" resp.Schema = PluralDataSourceSchema(ctx) } -func (d *{{.NameCamelCase}}sDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - var {{.NameCamelCase}}sConfig TF{{.NamePascalCase}}sDSModel - resp.Diagnostics.Append(req.Config.Get(ctx, &{{.NameCamelCase}}sConfig)...) +func (d *pluralDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var tfModel TF{{.NamePascalCase}}sDSModel + resp.Diagnostics.Append(req.Config.Get(ctx, &tfModel)...) if resp.Diagnostics.HasError() { return } @@ -47,7 +47,7 @@ func (d *{{.NameCamelCase}}sDS) Read(ctx context.Context, req datasource.ReadReq //} // TODO: process response into new terraform state - new{{.NamePascalCase}}sModel, diags := NewTF{{.NamePascalCase}}s(ctx, apiResp) + new{{.NamePascalCase}}sModel, diags := NewTFModelPluralDS(ctx, apiResp) if diags.HasError() { resp.Diagnostics.Append(diags...) return diff --git a/tools/scaffold/template/resource.tmpl b/tools/scaffold/template/resource.tmpl index 5735a91ffc..75304c3be2 100644 --- a/tools/scaffold/template/resource.tmpl +++ b/tools/scaffold/template/resource.tmpl @@ -9,36 +9,36 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) -const {{.NameCamelCase}}Name = "{{.NameSnakeCase}}" +const resourceName = "{{.NameSnakeCase}}" -var _ resource.ResourceWithConfigure = &{{.NameCamelCase}}RS{} -var _ resource.ResourceWithImportState = &{{.NameCamelCase}}RS{} +var _ resource.ResourceWithConfigure = &rs{} +var _ resource.ResourceWithImportState = &rs{} func Resource() resource.Resource { - return &{{.NameCamelCase}}RS{ + return &rs{ RSCommon: config.RSCommon{ - ResourceName: {{.NameCamelCase}}Name, + ResourceName: resourceName, }, } } -type {{.NameCamelCase}}RS struct { +type rs struct { config.RSCommon } -func (r *{{.NameCamelCase}}RS) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { +func (r *rs) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { // TODO: Schema and model must be defined in resource_schema.go. Details on scaffolding this file found in contributing/development-best-practices.md under "Scaffolding Schema and Model Definitions" resp.Schema = ResourceSchema(ctx) } -func (r *{{.NameCamelCase}}RS) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - var {{.NameCamelCase}}Plan TF{{.NamePascalCase}}Model - resp.Diagnostics.Append(req.Plan.Get(ctx, &{{.NameCamelCase}}Plan)...) +func (r *rs) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var tfModel TFModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &tfModel)...) if resp.Diagnostics.HasError() { return } - {{.NameCamelCase}}Req, diags := New{{.NamePascalCase}}Req(ctx, &{{.NameCamelCase}}Plan) + {{.NameCamelCase}}Req, diags := NewAtlasReq(ctx, &tfModel) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -53,7 +53,7 @@ func (r *{{.NameCamelCase}}RS) Create(ctx context.Context, req resource.CreateRe //} // TODO: process response into new terraform state - new{{.NamePascalCase}}Model, diags := NewTF{{.NamePascalCase}}(ctx, apiResp) + new{{.NamePascalCase}}Model, diags := NewTFModel(ctx, apiResp) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -61,8 +61,8 @@ func (r *{{.NameCamelCase}}RS) Create(ctx context.Context, req resource.CreateRe resp.Diagnostics.Append(resp.State.Set(ctx, new{{.NamePascalCase}}Model)...) } -func (r *{{.NameCamelCase}}RS) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - var {{.NameCamelCase}}State TF{{.NamePascalCase}}Model +func (r *rs) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var {{.NameCamelCase}}State TFModel resp.Diagnostics.Append(req.State.Get(ctx, &{{.NameCamelCase}}State)...) if resp.Diagnostics.HasError() { return @@ -81,7 +81,7 @@ func (r *{{.NameCamelCase}}RS) Read(ctx context.Context, req resource.ReadReques //} // TODO: process response into new terraform state - new{{.NamePascalCase}}Model, diags := NewTF{{.NamePascalCase}}(ctx, apiResp) + new{{.NamePascalCase}}Model, diags := NewTFModel(ctx, apiResp) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -89,14 +89,14 @@ func (r *{{.NameCamelCase}}RS) Read(ctx context.Context, req resource.ReadReques resp.Diagnostics.Append(resp.State.Set(ctx, new{{.NamePascalCase}}Model)...) } -func (r *{{.NameCamelCase}}RS) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var {{.NameCamelCase}}Plan TF{{.NamePascalCase}}Model - resp.Diagnostics.Append(req.Plan.Get(ctx, &{{.NameCamelCase}}Plan)...) +func (r *rs) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var tfModel TFModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &tfModel)...) if resp.Diagnostics.HasError() { return } - {{.NameCamelCase}}Req, diags := New{{.NamePascalCase}}Req(ctx, &{{.NameCamelCase}}Plan) + {{.NameCamelCase}}Req, diags := NewAtlasReq(ctx, &tfModel) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -111,7 +111,7 @@ func (r *{{.NameCamelCase}}RS) Update(ctx context.Context, req resource.UpdateRe // TODO: process response into new terraform state - new{{.NamePascalCase}}Model, diags := NewTF{{.NamePascalCase}}(ctx, apiResp) + new{{.NamePascalCase}}Model, diags := NewTFModel(ctx, apiResp) if diags.HasError() { resp.Diagnostics.Append(diags...) return @@ -119,8 +119,8 @@ func (r *{{.NameCamelCase}}RS) Update(ctx context.Context, req resource.UpdateRe resp.Diagnostics.Append(resp.State.Set(ctx, new{{.NamePascalCase}}Model)...) } -func (r *{{.NameCamelCase}}RS) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - var {{.NameCamelCase}}State *TF{{.NamePascalCase}}Model +func (r *rs) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var {{.NameCamelCase}}State *TFModel resp.Diagnostics.Append(req.State.Get(ctx, &{{.NameCamelCase}}State)...) if resp.Diagnostics.HasError() { return @@ -135,7 +135,7 @@ func (r *{{.NameCamelCase}}RS) Delete(ctx context.Context, req resource.DeleteRe // } } -func (r *{{.NameCamelCase}}RS) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +func (r *rs) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { // TODO: parse req.ID string taking into account documented format. Example: // projectID, other, err := split{{.NamePascalCase}}ImportID(req.ID)