diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index db3a0e042c..7a6b5b3fcd 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -22,6 +22,7 @@ jobs: cluster_outage_simulation: ${{ steps.filter.outputs.cluster_outage_simulation }} advanced_cluster: ${{ steps.filter.outputs.advanced_cluster }} cluster: ${{ steps.filter.outputs.cluster }} + search_deployment: ${{ steps.filter.outputs.search_deployment }} generic: ${{ steps.filter.outputs.generic }} backup_online_archive: ${{ steps.filter.outputs.backup_online_archive }} backup_snapshots: ${{ steps.filter.outputs.backup_snapshots }} @@ -49,6 +50,8 @@ jobs: - 'mongodbatlas/**advanced_cluster**.go' cluster: - 'mongodbatlas/**mongodbatlas_cluster**.go' + search_deployment: + - 'mongodbatlas/**search_deployment**.go' generic: - 'mongodbatlas/data_source_mongodbatlas_backup_compliance_policy*.go' - 'mongodbatlas/resource_mongodbatlas_backup_compliance_policy*.go' @@ -185,6 +188,30 @@ jobs: TEST_REGEX: "^TestAccClusterRSCluster" run: make testacc + search_deployment: + needs: [ change-detection ] + if: ${{ needs.change-detection.outputs.search_deployment == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || github.event.label.name == 'run-testacc' || github.event.label.name == 'run-testacc-search-deployment' || inputs.parent-event-name == 'release' }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version-file: 'go.mod' + - name: Acceptance Tests + env: + MONGODB_ATLAS_PUBLIC_KEY: ${{ secrets.MONGODB_ATLAS_PUBLIC_KEY_CLOUD_DEV }} + MONGODB_ATLAS_PRIVATE_KEY: ${{ secrets.MONGODB_ATLAS_PRIVATE_KEY_CLOUD_DEV }} + MONGODB_ATLAS_ORG_ID: ${{ vars.MONGODB_ATLAS_ORG_ID_CLOUD_DEV }} + MONGODB_ATLAS_BASE_URL: ${{ vars.MONGODB_ATLAS_BASE_URL }} + ACCTEST_TIMEOUT: ${{ vars.ACCTEST_TIMEOUT }} + TF_LOG: ${{ vars.LOG_LEVEL }} + TF_ACC: 1 + PARALLEL_GO_TEST: 20 + TEST_REGEX: "^TestAccSearchDeployment" + run: make testacc + generic: # Acceptance tests that do not use any time-consuming resource (example: cluster) needs: [ change-detection ] if: ${{ needs.change-detection.outputs.generic == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || github.event.label.name == 'run-testacc' || github.event.label.name == 'run-testacc-generic' || inputs.parent-event-name == 'release' }} diff --git a/examples/atlas-search-deployment/README.md b/examples/atlas-search-deployment/README.md new file mode 100644 index 0000000000..dd06461ae2 --- /dev/null +++ b/examples/atlas-search-deployment/README.md @@ -0,0 +1,11 @@ +# MongoDB Atlas Provider - Atlas Cluster with dedicated Search Nodes Deployment + +This example shows how you can use Atlas Dedicated Search Nodes in Terraform. As part of it, a project and cluster resource are created as a prerequisite. + +Variables Required to be set: + +- `public_key`: Atlas public key +- `private_key`: Atlas private key +- `org_id`: Organization ID where the project and cluster will be created. + +For additional information you can visit the [Search Node Documentation](https://www.mongodb.com/docs/atlas/cluster-config/multi-cloud-distribution/#search-nodes-for-workload-isolation). \ No newline at end of file diff --git a/examples/atlas-search-deployment/main.tf b/examples/atlas-search-deployment/main.tf new file mode 100644 index 0000000000..42212eef18 --- /dev/null +++ b/examples/atlas-search-deployment/main.tf @@ -0,0 +1,33 @@ +resource "mongodbatlas_project" "example" { + name = "project-name" + org_id = var.org_id +} + +resource "mongodbatlas_advanced_cluster" "example" { + project_id = mongodbatlas_project.example.id + name = "ClusterExample" + cluster_type = "REPLICASET" + + replication_specs { + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } +} + +resource "mongodbatlas_search_deployment" "example" { + project_id = mongodbatlas_project.example.id + cluster_name = mongodbatlas_advanced_cluster.example.name + specs = [ + { + instance_size = "S20_HIGHCPU_NVME" + node_count = 2 + } + ] +} \ No newline at end of file diff --git a/examples/atlas-search-deployment/provider.tf b/examples/atlas-search-deployment/provider.tf new file mode 100644 index 0000000000..e5aeda8033 --- /dev/null +++ b/examples/atlas-search-deployment/provider.tf @@ -0,0 +1,4 @@ +provider "mongodbatlas" { + public_key = var.public_key + private_key = var.private_key +} \ No newline at end of file diff --git a/examples/atlas-search-deployment/variables.tf b/examples/atlas-search-deployment/variables.tf new file mode 100644 index 0000000000..503476e252 --- /dev/null +++ b/examples/atlas-search-deployment/variables.tf @@ -0,0 +1,12 @@ +variable "public_key" { + description = "Public API key to authenticate to Atlas" + type = string +} +variable "private_key" { + description = "Private API key to authenticate to Atlas" + type = string +} +variable "org_id" { + description = "Atlas Organization ID" + type = string +} \ No newline at end of file diff --git a/examples/atlas-search-deployment/versions.tf b/examples/atlas-search-deployment/versions.tf new file mode 100644 index 0000000000..7cac4906f0 --- /dev/null +++ b/examples/atlas-search-deployment/versions.tf @@ -0,0 +1,9 @@ +terraform { + required_providers { + mongodbatlas = { + source = "mongodb/mongodbatlas" + version = "~> 1.13" + } + } + required_version = ">= 1.0" +} \ No newline at end of file diff --git a/mongodbatlas/framework/retry/retry_state.go b/mongodbatlas/framework/retry/retry_state.go index 776e76055c..df887ef77c 100644 --- a/mongodbatlas/framework/retry/retry_state.go +++ b/mongodbatlas/framework/retry/retry_state.go @@ -4,4 +4,8 @@ const ( RetryStrategyPendingState = "PENDING" RetryStrategyCompletedState = "COMPLETED" RetryStrategyErrorState = "ERROR" + RetryStrategyPausedState = "PAUSED" + RetryStrategyUpdatingState = "UPDATING" + RetryStrategyIdleState = "IDLE" + RetryStrategyDeletedState = "DELETED" ) diff --git a/mongodbatlas/fw_data_source_mongodbatlas_search_deployment.go b/mongodbatlas/fw_data_source_mongodbatlas_search_deployment.go new file mode 100644 index 0000000000..289bc1caf7 --- /dev/null +++ b/mongodbatlas/fw_data_source_mongodbatlas_search_deployment.go @@ -0,0 +1,99 @@ +package mongodbatlas + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +var _ datasource.DataSource = &SearchDeploymentDS{} +var _ datasource.DataSourceWithConfigure = &SearchDeploymentDS{} + +func NewSearchDeploymentDS() datasource.DataSource { + return &SearchDeploymentDS{ + DSCommon: DSCommon{ + dataSourceName: searchDeploymentName, + }, + } +} + +type tfSearchDeploymentDSModel struct { + ID types.String `tfsdk:"id"` + ClusterName types.String `tfsdk:"cluster_name"` + ProjectID types.String `tfsdk:"project_id"` + Specs types.List `tfsdk:"specs"` + StateName types.String `tfsdk:"state_name"` +} + +type SearchDeploymentDS struct { + DSCommon +} + +func (d *SearchDeploymentDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "cluster_name": schema.StringAttribute{ + Required: true, + }, + "project_id": schema.StringAttribute{ + Required: true, + }, + "specs": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "instance_size": schema.StringAttribute{ + Computed: true, + }, + "node_count": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + Computed: true, + }, + "state_name": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +func (d *SearchDeploymentDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var searchDeploymentConfig tfSearchDeploymentDSModel + resp.Diagnostics.Append(req.Config.Get(ctx, &searchDeploymentConfig)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := d.client.AtlasV2 + projectID := searchDeploymentConfig.ProjectID.ValueString() + clusterName := searchDeploymentConfig.ClusterName.ValueString() + deploymentResp, _, err := connV2.AtlasSearchApi.GetAtlasSearchDeployment(ctx, projectID, clusterName).Execute() + if err != nil { + resp.Diagnostics.AddError("error getting search node information", err.Error()) + return + } + + newSearchDeploymentModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, nil) + resp.Diagnostics.Append(diagnostics...) + if resp.Diagnostics.HasError() { + return + } + dsModel := convertToDSModel(newSearchDeploymentModel) + resp.Diagnostics.Append(resp.State.Set(ctx, dsModel)...) +} + +func convertToDSModel(inputModel *tfSearchDeploymentRSModel) tfSearchDeploymentDSModel { + return tfSearchDeploymentDSModel{ + ID: inputModel.ID, + ClusterName: inputModel.ClusterName, + ProjectID: inputModel.ProjectID, + Specs: inputModel.Specs, + StateName: inputModel.StateName, + } +} diff --git a/mongodbatlas/fw_provider.go b/mongodbatlas/fw_provider.go index cb0f981b25..aa9c86b55f 100644 --- a/mongodbatlas/fw_provider.go +++ b/mongodbatlas/fw_provider.go @@ -413,6 +413,7 @@ func (p *MongodbtlasProvider) DataSources(context.Context) []func() datasource.D NewProjectIPAccessListDS, NewAtlasUserDS, NewAtlasUsersDS, + NewSearchDeploymentDS, } } @@ -423,6 +424,7 @@ func (p *MongodbtlasProvider) Resources(context.Context) []func() resource.Resou NewDatabaseUserRS, NewAlertConfigurationRS, NewProjectIPAccessListRS, + NewSearchDeploymentRS, } } diff --git a/mongodbatlas/fw_resource_mongodbatlas_search_deployment.go b/mongodbatlas/fw_resource_mongodbatlas_search_deployment.go new file mode 100644 index 0000000000..800789f232 --- /dev/null +++ b/mongodbatlas/fw_resource_mongodbatlas_search_deployment.go @@ -0,0 +1,368 @@ +package mongodbatlas + +import ( + "context" + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + retrystrategy "github.com/mongodb/terraform-provider-mongodbatlas/mongodbatlas/framework/retry" + "github.com/mongodb/terraform-provider-mongodbatlas/mongodbatlas/util" + "go.mongodb.org/atlas-sdk/v20231115001/admin" +) + +var _ resource.ResourceWithConfigure = &SearchDeploymentRS{} +var _ resource.ResourceWithImportState = &SearchDeploymentRS{} + +const ( + searchDeploymentDoesNotExistsError = "ATLAS_FTS_DEPLOYMENT_DOES_NOT_EXIST" + searchDeploymentName = "search_deployment" +) + +func NewSearchDeploymentRS() resource.Resource { + return &SearchDeploymentRS{ + RSCommon: RSCommon{ + resourceName: searchDeploymentName, + }, + } +} + +type SearchDeploymentRS struct { + RSCommon +} + +type tfSearchDeploymentRSModel struct { + ID types.String `tfsdk:"id"` + ClusterName types.String `tfsdk:"cluster_name"` + ProjectID types.String `tfsdk:"project_id"` + Specs types.List `tfsdk:"specs"` + StateName types.String `tfsdk:"state_name"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +type tfSearchNodeSpecModel struct { + InstanceSize types.String `tfsdk:"instance_size"` + NodeCount types.Int64 `tfsdk:"node_count"` +} + +var SpecObjectType = types.ObjectType{AttrTypes: map[string]attr.Type{ + "instance_size": types.StringType, + "node_count": types.Int64Type, +}} + +func (r *SearchDeploymentRS) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + }, + "cluster_name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "project_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "specs": schema.ListNestedAttribute{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.SizeAtLeast(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "instance_size": schema.StringAttribute{ + Required: true, + }, + "node_count": schema.Int64Attribute{ + Required: true, + }, + }, + }, + Required: true, + }, + "state_name": schema.StringAttribute{ + Computed: true, + }, + "timeouts": timeouts.Attributes(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +const defaultSearchNodeTimeout time.Duration = 3 * time.Hour + +func (r *SearchDeploymentRS) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var searchDeploymentPlan tfSearchDeploymentRSModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &searchDeploymentPlan)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.client.AtlasV2 + projectID := searchDeploymentPlan.ProjectID.ValueString() + clusterName := searchDeploymentPlan.ClusterName.ValueString() + searchDeploymentReq := newSearchDeploymentReq(ctx, &searchDeploymentPlan) + if _, _, err := connV2.AtlasSearchApi.CreateAtlasSearchDeployment(ctx, projectID, clusterName, &searchDeploymentReq).Execute(); err != nil { + resp.Diagnostics.AddError("error during search deployment creation", err.Error()) + return + } + + createTimeout, diags := searchDeploymentPlan.Timeouts.Create(ctx, defaultSearchNodeTimeout) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + deploymentResp, err := waitSearchNodeStateTransition(ctx, projectID, clusterName, connV2, createTimeout) + if err != nil { + resp.Diagnostics.AddError("error during search deployment creation", err.Error()) + return + } + newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, &searchDeploymentPlan.Timeouts) + resp.Diagnostics.Append(diagnostics...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newSearchNodeModel)...) +} + +func (r *SearchDeploymentRS) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var searchDeploymentPlan tfSearchDeploymentRSModel + resp.Diagnostics.Append(req.State.Get(ctx, &searchDeploymentPlan)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.client.AtlasV2 + projectID := searchDeploymentPlan.ProjectID.ValueString() + clusterName := searchDeploymentPlan.ClusterName.ValueString() + deploymentResp, _, err := connV2.AtlasSearchApi.GetAtlasSearchDeployment(ctx, projectID, clusterName).Execute() + if err != nil { + resp.Diagnostics.AddError("error getting search deployment information", err.Error()) + return + } + + newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, &searchDeploymentPlan.Timeouts) + resp.Diagnostics.Append(diagnostics...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newSearchNodeModel)...) +} + +func (r *SearchDeploymentRS) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var searchDeploymentPlan tfSearchDeploymentRSModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &searchDeploymentPlan)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.client.AtlasV2 + projectID := searchDeploymentPlan.ProjectID.ValueString() + clusterName := searchDeploymentPlan.ClusterName.ValueString() + searchDeploymentReq := newSearchDeploymentReq(ctx, &searchDeploymentPlan) + if _, _, err := connV2.AtlasSearchApi.UpdateAtlasSearchDeployment(ctx, projectID, clusterName, &searchDeploymentReq).Execute(); err != nil { + resp.Diagnostics.AddError("error during search deployment update", err.Error()) + return + } + + updateTimeout, diags := searchDeploymentPlan.Timeouts.Update(ctx, defaultSearchNodeTimeout) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + deploymentResp, err := waitSearchNodeStateTransition(ctx, projectID, clusterName, connV2, updateTimeout) + if err != nil { + resp.Diagnostics.AddError("error during search deployment update", err.Error()) + return + } + newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, &searchDeploymentPlan.Timeouts) + resp.Diagnostics.Append(diagnostics...) + if resp.Diagnostics.HasError() { + return + } + resp.Diagnostics.Append(resp.State.Set(ctx, newSearchNodeModel)...) +} + +func (r *SearchDeploymentRS) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var searchDeploymentState *tfSearchDeploymentRSModel + resp.Diagnostics.Append(req.State.Get(ctx, &searchDeploymentState)...) + if resp.Diagnostics.HasError() { + return + } + + connV2 := r.client.AtlasV2 + projectID := searchDeploymentState.ProjectID.ValueString() + clusterName := searchDeploymentState.ClusterName.ValueString() + if _, err := connV2.AtlasSearchApi.DeleteAtlasSearchDeployment(ctx, projectID, clusterName).Execute(); err != nil { + resp.Diagnostics.AddError("error during search deployment delete", err.Error()) + return + } + + deleteTimeout, diags := searchDeploymentState.Timeouts.Delete(ctx, defaultSearchNodeTimeout) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + if err := waitSearchNodeDelete(ctx, projectID, clusterName, connV2, deleteTimeout); err != nil { + resp.Diagnostics.AddError("error during search deployment delete", err.Error()) + return + } +} + +func (r *SearchDeploymentRS) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + projectID, clusterName, err := splitSearchNodeImportID(req.ID) + if err != nil { + resp.Diagnostics.AddError("error splitting search deployment import ID", err.Error()) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), projectID)...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("cluster_name"), clusterName)...) + if resp.Diagnostics.HasError() { + return + } +} + +func splitSearchNodeImportID(id string) (projectID, clusterName string, err error) { + var re = regexp.MustCompile(`(?s)^([0-9a-fA-F]{24})-(.*)$`) + parts := re.FindStringSubmatch(id) + + if len(parts) != 3 { + err = errors.New("use the format {project_id}-{cluster_name}") + return + } + + projectID = parts[1] + clusterName = parts[2] + return +} + +func waitSearchNodeStateTransition(ctx context.Context, projectID, clusterName string, client *admin.APIClient, timeout time.Duration) (*admin.ApiSearchDeploymentResponse, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{retrystrategy.RetryStrategyUpdatingState, retrystrategy.RetryStrategyPausedState}, + Target: []string{retrystrategy.RetryStrategyIdleState}, + Refresh: searchDeploymentRefreshFunc(ctx, projectID, clusterName, client), + Timeout: timeout, + MinTimeout: 1 * time.Minute, + Delay: 1 * time.Minute, + } + + result, err := stateConf.WaitForStateContext(ctx) + if err != nil { + return nil, err + } + if deploymentResp, ok := result.(*admin.ApiSearchDeploymentResponse); ok && deploymentResp != nil { + return deploymentResp, nil + } + return nil, errors.New("did not obtain valid result when waiting for search deployment state transition") +} + +func waitSearchNodeDelete(ctx context.Context, projectID, clusterName string, client *admin.APIClient, timeout time.Duration) error { + stateConf := &retry.StateChangeConf{ + Pending: []string{retrystrategy.RetryStrategyIdleState, retrystrategy.RetryStrategyUpdatingState, retrystrategy.RetryStrategyPausedState}, + Target: []string{retrystrategy.RetryStrategyDeletedState}, + Refresh: searchDeploymentRefreshFunc(ctx, projectID, clusterName, client), + Timeout: timeout, + MinTimeout: 30 * time.Second, + Delay: 1 * time.Minute, + } + _, err := stateConf.WaitForStateContext(ctx) + return err +} + +func searchDeploymentRefreshFunc(ctx context.Context, projectID, clusterName string, client *admin.APIClient) retry.StateRefreshFunc { + return func() (any, string, error) { + deploymentResp, resp, err := client.AtlasSearchApi.GetAtlasSearchDeployment(ctx, projectID, clusterName).Execute() + if err != nil && deploymentResp == nil && resp == nil { + return nil, "", err + } + if err != nil { + if resp.StatusCode == 400 && strings.Contains(err.Error(), searchDeploymentDoesNotExistsError) { + return "", retrystrategy.RetryStrategyDeletedState, nil + } + if resp.StatusCode == 503 { + return "", retrystrategy.RetryStrategyPendingState, nil + } + return nil, "", err + } + + if util.IsStringPresent(deploymentResp.StateName) { + tflog.Debug(ctx, fmt.Sprintf("search deployment status: %s", *deploymentResp.StateName)) + return deploymentResp, *deploymentResp.StateName, nil + } + return deploymentResp, "", nil + } +} + +func newSearchDeploymentReq(ctx context.Context, searchDeploymentPlan *tfSearchDeploymentRSModel) admin.ApiSearchDeploymentRequest { + var specs []tfSearchNodeSpecModel + searchDeploymentPlan.Specs.ElementsAs(ctx, &specs, true) + + resultSpecs := make([]admin.ApiSearchDeploymentSpec, len(specs)) + for i, spec := range specs { + resultSpecs[i] = admin.ApiSearchDeploymentSpec{ + InstanceSize: spec.InstanceSize.ValueString(), + NodeCount: int(spec.NodeCount.ValueInt64()), + } + } + + return admin.ApiSearchDeploymentRequest{ + Specs: resultSpecs, + } +} + +func newTFSearchDeployment(ctx context.Context, clusterName string, deployResp *admin.ApiSearchDeploymentResponse, timeout *timeouts.Value) (*tfSearchDeploymentRSModel, diag.Diagnostics) { + result := tfSearchDeploymentRSModel{ + ID: types.StringPointerValue(deployResp.Id), + ClusterName: types.StringValue(clusterName), + ProjectID: types.StringPointerValue(deployResp.GroupId), + StateName: types.StringPointerValue(deployResp.StateName), + } + + if timeout != nil { + result.Timeouts = *timeout + } + + specsList, diagnostics := types.ListValueFrom(ctx, SpecObjectType, newTFSpecsModel(deployResp.Specs)) + if diagnostics.HasError() { + return nil, diagnostics + } + + result.Specs = specsList + return &result, nil +} + +func newTFSpecsModel(specs []admin.ApiSearchDeploymentSpec) []tfSearchNodeSpecModel { + result := make([]tfSearchNodeSpecModel, len(specs)) + for i, v := range specs { + result[i] = tfSearchNodeSpecModel{ + InstanceSize: types.StringValue(v.InstanceSize), + NodeCount: types.Int64Value(int64(v.NodeCount)), + } + } + + return result +} diff --git a/mongodbatlas/fw_resource_mongodbatlas_search_deployment_test.go b/mongodbatlas/fw_resource_mongodbatlas_search_deployment_test.go new file mode 100644 index 0000000000..ecb1bc3260 --- /dev/null +++ b/mongodbatlas/fw_resource_mongodbatlas_search_deployment_test.go @@ -0,0 +1,153 @@ +package mongodbatlas + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +func TestAccSearchDeployment_basic(t *testing.T) { + var ( + resourceName = "mongodbatlas_search_deployment.test" + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + projectName = acctest.RandomWithPrefix("test-acc-search-dep") + clusterName = acctest.RandomWithPrefix("test-acc-search-dep") + ) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheckBasic(t) }, + ProtoV6ProviderFactories: testAccProviderV6Factories, + CheckDestroy: testAccCheckMongoDBAtlasSearchNodeDestroy, + Steps: []resource.TestStep{ + newSearchNodeTestStep(resourceName, orgID, projectName, clusterName, "S20_HIGHCPU_NVME", 3), + newSearchNodeTestStep(resourceName, orgID, projectName, clusterName, "S30_HIGHCPU_NVME", 4), + { + Config: testAccMongoDBAtlasSearchDeploymentConfig(orgID, projectName, clusterName, "S30_HIGHCPU_NVME", 4), + ResourceName: resourceName, + ImportStateIdFunc: testAccCheckSearchNodeImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func newSearchNodeTestStep(resourceName, orgID, projectName, clusterName, instanceSize string, searchNodeCount int) resource.TestStep { + resourceChecks := searchNodeChecks(resourceName, clusterName, instanceSize, searchNodeCount) + dataSourceChecks := searchNodeChecks(fmt.Sprintf("data.%s", resourceName), clusterName, instanceSize, searchNodeCount) + return resource.TestStep{ + Config: testAccMongoDBAtlasSearchDeploymentConfig(orgID, projectName, clusterName, instanceSize, searchNodeCount), + Check: resource.ComposeTestCheckFunc(append(resourceChecks, dataSourceChecks...)...), + } +} + +func searchNodeChecks(targetName, clusterName, instanceSize string, searchNodeCount int) []resource.TestCheckFunc { + return []resource.TestCheckFunc{ + testAccCheckMongoDBAtlasSearchNodeExists(targetName), + resource.TestCheckResourceAttrSet(targetName, "id"), + resource.TestCheckResourceAttrSet(targetName, "project_id"), + resource.TestCheckResourceAttr(targetName, "cluster_name", clusterName), + resource.TestCheckResourceAttr(targetName, "specs.0.instance_size", instanceSize), + resource.TestCheckResourceAttr(targetName, "specs.0.node_count", fmt.Sprintf("%d", searchNodeCount)), + resource.TestCheckResourceAttrSet(targetName, "state_name"), + } +} + +func testAccMongoDBAtlasSearchDeploymentConfig(orgID, projectName, clusterName, instanceSize string, searchNodeCount int) string { + clusterConfig := advancedClusterConfig(orgID, projectName, clusterName) + return fmt.Sprintf(` + %[1]s + + resource "mongodbatlas_search_deployment" "test" { + project_id = mongodbatlas_project.test.id + cluster_name = mongodbatlas_advanced_cluster.test.name + specs = [ + { + instance_size = %[2]q + node_count = %[3]d + } + ] + } + + data "mongodbatlas_search_deployment" "test" { + project_id = mongodbatlas_search_deployment.test.project_id + cluster_name = mongodbatlas_search_deployment.test.cluster_name + } + `, clusterConfig, instanceSize, searchNodeCount) +} + +func advancedClusterConfig(orgID, projectName, clusterName string) string { + return fmt.Sprintf(` + resource "mongodbatlas_project" "test" { + org_id = %[1]q + name = %[2]q + } + resource "mongodbatlas_advanced_cluster" "test" { + project_id = mongodbatlas_project.test.id + name = %[3]q + cluster_type = "REPLICASET" + retain_backups_enabled = "true" + + replication_specs { + region_configs { + electable_specs { + instance_size = "M10" + node_count = 3 + } + provider_name = "AWS" + priority = 7 + region_name = "US_EAST_1" + } + } + } + `, orgID, projectName, clusterName) +} + +func testAccCheckSearchNodeImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("not found: %s", resourceName) + } + return fmt.Sprintf("%s-%s", rs.Primary.Attributes["project_id"], rs.Primary.Attributes["cluster_name"]), nil + } +} + +func testAccCheckMongoDBAtlasSearchNodeExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("not found: %s", resourceName) + } + + connV2 := testAccProviderSdkV2.Meta().(*MongoDBClient).AtlasV2 + _, _, err := connV2.AtlasSearchApi.GetAtlasSearchDeployment(context.Background(), rs.Primary.Attributes["project_id"], rs.Primary.Attributes["cluster_name"]).Execute() + if err != nil { + return fmt.Errorf("search deployment (%s:%s) does not exist", rs.Primary.Attributes["project_id"], rs.Primary.Attributes["cluster_name"]) + } + return nil + } +} + +func testAccCheckMongoDBAtlasSearchNodeDestroy(state *terraform.State) error { + if projectDestroyedErr := testAccCheckMongoDBAtlasProjectDestroy(state); projectDestroyedErr != nil { + return projectDestroyedErr + } + if clusterDestroyedErr := testAccCheckMongoDBAtlasAdvancedClusterDestroy(state); clusterDestroyedErr != nil { + return clusterDestroyedErr + } + connV2 := testAccProviderSdkV2.Meta().(*MongoDBClient).AtlasV2 + for _, rs := range state.RootModule().Resources { + if rs.Type == "mongodbatlas_search_deployment" { + _, _, err := connV2.AtlasSearchApi.GetAtlasSearchDeployment(context.Background(), rs.Primary.Attributes["project_id"], rs.Primary.Attributes["cluster_name"]).Execute() + if err == nil { + return fmt.Errorf("search deployment (%s:%s) still exists", rs.Primary.Attributes["project_id"], rs.Primary.Attributes["cluster_name"]) + } + } + } + return nil +} diff --git a/website/docs/d/search_deployment.html.markdown b/website/docs/d/search_deployment.html.markdown new file mode 100644 index 0000000000..019baa510d --- /dev/null +++ b/website/docs/d/search_deployment.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "mongodbatlas" +page_title: "MongoDB Atlas: search deployment" +sidebar_current: "docs-mongodbatlas-datasource-search-deployment" +description: |- +Describes a Search Deployment. +--- + +# Data Source: mongodbatlas_search_deployment + +`mongodbatlas_search_deployment` describes a search node deployment. + +## Example Usage + +```terraform +data "mongodbatlas_search_deployment" "test" { + project_id = "" + cluster_name = "" +} +``` + +## Argument Reference + +* `project_id` - (Required) The unique identifier for the [project](https://docs.atlas.mongodb.com/organizations-projects/#std-label-projects) that contains the specified cluster. +* `cluster_name` - (Required) The name of the cluster containing a search node deployment. + +## Attributes Reference + +* `specs` - List of settings that configure the search nodes for your cluster. See [specs](#specs). +* `state_name` - Human-readable label that indicates the current operating condition of this search node deployment. + +### Specs +* `instance_size` - (Required) Hardware specification for the search node instance sizes. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Atlas-Search/operation/createAtlasSearchDeployment) describes the valid values. More details can also be found in the [Search Node Documentation](https://www.mongodb.com/docs/atlas/cluster-config/multi-cloud-distribution/#search-tier). +* `node_count` - (Required) Number of search nodes in the cluster. + + +For more information see: [MongoDB Atlas API - Search Node](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Atlas-Search/operation/createAtlasSearchDeployment) Documentation. diff --git a/website/docs/r/search_deployment.html.markdown b/website/docs/r/search_deployment.html.markdown new file mode 100644 index 0000000000..966e7e3d72 --- /dev/null +++ b/website/docs/r/search_deployment.html.markdown @@ -0,0 +1,60 @@ +--- +layout: "mongodbatlas" +page_title: "MongoDB Atlas: search deployment" +sidebar_current: "docs-mongodbatlas-resource-search-deployment" +description: |- +Provides a Search Deployment resource. +--- + +# Resource: mongodbatlas_search_deployment + +`mongodbatlas_search_deployment` provides a Search Deployment resource. The resource lets you create, edit and delete dedicated search nodes in a cluster. + +-> **NOTE:** For details on supported cloud providers and existing limitations you can visit the [Search Node Documentation](https://www.mongodb.com/docs/atlas/cluster-config/multi-cloud-distribution/#search-nodes-for-workload-isolation). +-> **NOTE:** Only a single search deployment resource can be defined for each cluster. + + +## Example Usage + +```terraform +resource "mongodbatlas_search_deployment" "test" { + project_id = "PROJECT ID" + cluster_name = "NAME OF CLUSTER" + specs = [ + { + instance_size = "S20_HIGHCPU_NVME" + node_count = 2 + } + ] +} +``` + +## Argument Reference + +* `project_id` - (Required) Unique 24-hexadecimal digit string that identifies your project. +* `cluster_name` - (Required) Label that identifies the cluster to create search nodes for. +* `specs` - (Required) List of settings that configure the search nodes for your cluster. This list is currently limited to defining a single element. See [specs](#specs). +* `timeouts`- (Optional) The time to wait for search nodes to be created, updated, or deleted. The timeout value is defined by a signed sequence of decimal numbers with a time unit suffix such as: `1h45m`, `300s`, `10m`, .... The valid time units are: `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. The attribute must be defined with [nested attributes](https://developer.hashicorp.com/terraform/plugin/framework/resources/timeouts#attribute). The default timeout for create, update, and delete is `3h`. Learn more about timeouts [here](https://developer.hashicorp.com/terraform/plugin/framework/resources/timeouts). + +### Specs + +Specs list is defined as a ["list nested attribute"](https://developer.hashicorp.com/terraform/plugin/framework/handling-data/attributes/list-nested) containing a single element. + +* `instance_size` - (Required) Hardware specification for the search node instance sizes. The [MongoDB Atlas API](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Atlas-Search/operation/createAtlasSearchDeployment) describes the valid values. More details can also be found in the [Search Node Documentation](https://www.mongodb.com/docs/atlas/cluster-config/multi-cloud-distribution/#search-tier). +* `node_count` - (Required) Number of search nodes in the cluster. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `state_name` - Human-readable label that indicates the current operating condition of this search node deployment. + +## Import + +Search node resource can be imported using the project ID and cluster name, in the format `PROJECT_ID-CLUSTER_NAME`, e.g. + +``` +$ terraform import mongodbatlas_search_deployment.test 650972848269185c55f40ca1-Cluster0 +``` + +For more information see: [MongoDB Atlas API - Search Node](https://www.mongodb.com/docs/atlas/reference/api-resources-spec/#tag/Atlas-Search/operation/createAtlasSearchDeployment) Documentation.