Skip to content

Commit

Permalink
feat: implement search node data source with acceptance tests and docs
Browse files Browse the repository at this point in the history
  • Loading branch information
AgustinBettati committed Nov 15, 2023
1 parent 7a5d987 commit 6fc6e3e
Show file tree
Hide file tree
Showing 5 changed files with 195 additions and 40 deletions.
99 changes: 99 additions & 0 deletions mongodbatlas/fw_data_source_mongodbatlas_search_deployment.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
package mongodbatlas

import (
"context"

"github.com/hashicorp/terraform-plugin-framework/datasource"
"github.com/hashicorp/terraform-plugin-framework/datasource/schema"
"github.com/hashicorp/terraform-plugin-framework/types"
)

var _ datasource.DataSource = &SearchDeploymentDS{}
var _ datasource.DataSourceWithConfigure = &SearchDeploymentDS{}

func NewSearchDeploymentDS() datasource.DataSource {
return &SearchDeploymentDS{
DSCommon: DSCommon{
dataSourceName: searchDeploymentName,
},
}
}

type tfSearchDeploymentDSModel struct {
ID types.String `tfsdk:"id"`
ClusterName types.String `tfsdk:"cluster_name"`
ProjectID types.String `tfsdk:"project_id"`
Specs types.List `tfsdk:"specs"`
StateName types.String `tfsdk:"state_name"`
}

type SearchDeploymentDS struct {
DSCommon
}

func (d *SearchDeploymentDS) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Attributes: map[string]schema.Attribute{
"id": schema.StringAttribute{
Computed: true,
},
"cluster_name": schema.StringAttribute{
Required: true,
},
"project_id": schema.StringAttribute{
Required: true,
},
"specs": schema.ListNestedAttribute{
NestedObject: schema.NestedAttributeObject{
Attributes: map[string]schema.Attribute{
"instance_size": schema.StringAttribute{
Computed: true,
},
"node_count": schema.Int64Attribute{
Computed: true,
},
},
},
Computed: true,
},
"state_name": schema.StringAttribute{
Computed: true,
},
},
}
}

func (d *SearchDeploymentDS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) {
var searchDeploymentConfig tfSearchDeploymentDSModel
resp.Diagnostics.Append(req.Config.Get(ctx, &searchDeploymentConfig)...)
if resp.Diagnostics.HasError() {
return
}

connV2 := d.client.AtlasV2
projectID := searchDeploymentConfig.ProjectID.ValueString()
clusterName := searchDeploymentConfig.ClusterName.ValueString()
deploymentResp, _, err := connV2.AtlasSearchApi.GetAtlasSearchDeployment(ctx, projectID, clusterName).Execute()
if err != nil {
resp.Diagnostics.AddError("error getting search node information", err.Error())
return
}

newSearchDeploymentModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, nil)
resp.Diagnostics.Append(diagnostics...)
if resp.Diagnostics.HasError() {
return
}
dsModel := convertToDSModel(newSearchDeploymentModel)
resp.Diagnostics.Append(resp.State.Set(ctx, dsModel)...)
}

func convertToDSModel(inputModel *tfSearchDeploymentRSModel) tfSearchDeploymentDSModel {
return tfSearchDeploymentDSModel{
ID: inputModel.ID,
ClusterName: inputModel.ClusterName,
ProjectID: inputModel.ProjectID,
Specs: inputModel.Specs,
StateName: inputModel.StateName,
}
}
1 change: 1 addition & 0 deletions mongodbatlas/fw_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -413,6 +413,7 @@ func (p *MongodbtlasProvider) DataSources(context.Context) []func() datasource.D
NewProjectIPAccessListDS,
NewAtlasUserDS,
NewAtlasUsersDS,
NewSearchDeploymentDS,
}
}

Expand Down
68 changes: 36 additions & 32 deletions mongodbatlas/fw_resource_mongodbatlas_search_deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,13 @@ var _ resource.ResourceWithImportState = &SearchDeploymentRS{}

const (
searchDeploymentDoesNotExistsError = "ATLAS_FTS_DEPLOYMENT_DOES_NOT_EXIST"
searchDeploymentName = "search_deployment"
)

func NewSearchDeploymentRS() resource.Resource {
return &SearchDeploymentRS{
RSCommon: RSCommon{
resourceName: "search_deployment",
resourceName: searchDeploymentName,
},
}
}
Expand Down Expand Up @@ -114,22 +115,22 @@ func (r *SearchDeploymentRS) Schema(ctx context.Context, req resource.SchemaRequ
const defaultSearchNodeTimeout time.Duration = 3 * time.Hour

func (r *SearchDeploymentRS) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
var searchNodePlan tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &searchNodePlan)...)
var searchDeploymentPlan tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &searchDeploymentPlan)...)
if resp.Diagnostics.HasError() {
return
}

connV2 := r.client.AtlasV2
projectID := searchNodePlan.ProjectID.ValueString()
clusterName := searchNodePlan.ClusterName.ValueString()
searchDeploymentReq := newSearchDeploymentReq(ctx, &searchNodePlan)
projectID := searchDeploymentPlan.ProjectID.ValueString()
clusterName := searchDeploymentPlan.ClusterName.ValueString()
searchDeploymentReq := newSearchDeploymentReq(ctx, &searchDeploymentPlan)
if _, _, err := connV2.AtlasSearchApi.CreateAtlasSearchDeployment(ctx, projectID, clusterName, &searchDeploymentReq).Execute(); err != nil {
resp.Diagnostics.AddError("error during search deployment creation", err.Error())
return
}

createTimeout, diags := searchNodePlan.Timeouts.Create(ctx, defaultSearchNodeTimeout)
createTimeout, diags := searchDeploymentPlan.Timeouts.Create(ctx, defaultSearchNodeTimeout)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
Expand All @@ -139,7 +140,7 @@ func (r *SearchDeploymentRS) Create(ctx context.Context, req resource.CreateRequ
resp.Diagnostics.AddError("error during search deployment creation", err.Error())
return
}
newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, searchNodePlan.Timeouts)
newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, &searchDeploymentPlan.Timeouts)
resp.Diagnostics.Append(diagnostics...)
if resp.Diagnostics.HasError() {
return
Expand All @@ -148,22 +149,22 @@ func (r *SearchDeploymentRS) Create(ctx context.Context, req resource.CreateRequ
}

func (r *SearchDeploymentRS) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
var searchNodePlan tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.State.Get(ctx, &searchNodePlan)...)
var searchDeploymentPlan tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.State.Get(ctx, &searchDeploymentPlan)...)
if resp.Diagnostics.HasError() {
return
}

connV2 := r.client.AtlasV2
projectID := searchNodePlan.ProjectID.ValueString()
clusterName := searchNodePlan.ClusterName.ValueString()
projectID := searchDeploymentPlan.ProjectID.ValueString()
clusterName := searchDeploymentPlan.ClusterName.ValueString()
deploymentResp, _, err := connV2.AtlasSearchApi.GetAtlasSearchDeployment(ctx, projectID, clusterName).Execute()
if err != nil {
resp.Diagnostics.AddError("error getting search deployment information", err.Error())
return
}

newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, searchNodePlan.Timeouts)
newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, &searchDeploymentPlan.Timeouts)
resp.Diagnostics.Append(diagnostics...)
if resp.Diagnostics.HasError() {
return
Expand All @@ -172,22 +173,22 @@ func (r *SearchDeploymentRS) Read(ctx context.Context, req resource.ReadRequest,
}

func (r *SearchDeploymentRS) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
var searchNodePlan tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &searchNodePlan)...)
var searchDeploymentPlan tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.Plan.Get(ctx, &searchDeploymentPlan)...)
if resp.Diagnostics.HasError() {
return
}

connV2 := r.client.AtlasV2
projectID := searchNodePlan.ProjectID.ValueString()
clusterName := searchNodePlan.ClusterName.ValueString()
searchDeploymentReq := newSearchDeploymentReq(ctx, &searchNodePlan)
projectID := searchDeploymentPlan.ProjectID.ValueString()
clusterName := searchDeploymentPlan.ClusterName.ValueString()
searchDeploymentReq := newSearchDeploymentReq(ctx, &searchDeploymentPlan)
if _, _, err := connV2.AtlasSearchApi.UpdateAtlasSearchDeployment(ctx, projectID, clusterName, &searchDeploymentReq).Execute(); err != nil {
resp.Diagnostics.AddError("error during search deployment update", err.Error())
return
}

updateTimeout, diags := searchNodePlan.Timeouts.Update(ctx, defaultSearchNodeTimeout)
updateTimeout, diags := searchDeploymentPlan.Timeouts.Update(ctx, defaultSearchNodeTimeout)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
Expand All @@ -197,7 +198,7 @@ func (r *SearchDeploymentRS) Update(ctx context.Context, req resource.UpdateRequ
resp.Diagnostics.AddError("error during search deployment update", err.Error())
return
}
newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, searchNodePlan.Timeouts)
newSearchNodeModel, diagnostics := newTFSearchDeployment(ctx, clusterName, deploymentResp, &searchDeploymentPlan.Timeouts)
resp.Diagnostics.Append(diagnostics...)
if resp.Diagnostics.HasError() {
return
Expand All @@ -206,21 +207,21 @@ func (r *SearchDeploymentRS) Update(ctx context.Context, req resource.UpdateRequ
}

func (r *SearchDeploymentRS) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
var searchNodeState *tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.State.Get(ctx, &searchNodeState)...)
var searchDeploymentState *tfSearchDeploymentRSModel
resp.Diagnostics.Append(req.State.Get(ctx, &searchDeploymentState)...)
if resp.Diagnostics.HasError() {
return
}

connV2 := r.client.AtlasV2
projectID := searchNodeState.ProjectID.ValueString()
clusterName := searchNodeState.ClusterName.ValueString()
projectID := searchDeploymentState.ProjectID.ValueString()
clusterName := searchDeploymentState.ClusterName.ValueString()
if _, err := connV2.AtlasSearchApi.DeleteAtlasSearchDeployment(ctx, projectID, clusterName).Execute(); err != nil {
resp.Diagnostics.AddError("error during search deployment delete", err.Error())
return
}

deleteTimeout, diags := searchNodeState.Timeouts.Delete(ctx, defaultSearchNodeTimeout)
deleteTimeout, diags := searchDeploymentState.Timeouts.Delete(ctx, defaultSearchNodeTimeout)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
Expand Down Expand Up @@ -263,7 +264,7 @@ func waitSearchNodeStateTransition(ctx context.Context, projectID, clusterName s
stateConf := &retry.StateChangeConf{
Pending: []string{retrystrategy.RetryStrategyUpdatingState, retrystrategy.RetryStrategyPausedState},
Target: []string{retrystrategy.RetryStrategyIdleState},
Refresh: searchNodeRefreshFunc(ctx, projectID, clusterName, client),
Refresh: searchDeploymentRefreshFunc(ctx, projectID, clusterName, client),
Timeout: timeout,
MinTimeout: 1 * time.Minute,
Delay: 1 * time.Minute,
Expand All @@ -283,7 +284,7 @@ func waitSearchNodeDelete(ctx context.Context, projectID, clusterName string, cl
stateConf := &retry.StateChangeConf{
Pending: []string{retrystrategy.RetryStrategyIdleState, retrystrategy.RetryStrategyUpdatingState, retrystrategy.RetryStrategyPausedState},
Target: []string{retrystrategy.RetryStrategyDeletedState},
Refresh: searchNodeRefreshFunc(ctx, projectID, clusterName, client),
Refresh: searchDeploymentRefreshFunc(ctx, projectID, clusterName, client),
Timeout: timeout,
MinTimeout: 30 * time.Second,
Delay: 1 * time.Minute,
Expand All @@ -292,7 +293,7 @@ func waitSearchNodeDelete(ctx context.Context, projectID, clusterName string, cl
return err
}

func searchNodeRefreshFunc(ctx context.Context, projectID, clusterName string, client *admin.APIClient) retry.StateRefreshFunc {
func searchDeploymentRefreshFunc(ctx context.Context, projectID, clusterName string, client *admin.APIClient) retry.StateRefreshFunc {
return func() (any, string, error) {
deploymentResp, resp, err := client.AtlasSearchApi.GetAtlasSearchDeployment(ctx, projectID, clusterName).Execute()
if err != nil && deploymentResp == nil && resp == nil {
Expand All @@ -316,9 +317,9 @@ func searchNodeRefreshFunc(ctx context.Context, projectID, clusterName string, c
}
}

func newSearchDeploymentReq(ctx context.Context, searchNodePlan *tfSearchDeploymentRSModel) admin.ApiSearchDeploymentRequest {
func newSearchDeploymentReq(ctx context.Context, searchDeploymentPlan *tfSearchDeploymentRSModel) admin.ApiSearchDeploymentRequest {
var specs []tfSearchNodeSpecModel
searchNodePlan.Specs.ElementsAs(ctx, &specs, true)
searchDeploymentPlan.Specs.ElementsAs(ctx, &specs, true)

resultSpecs := make([]admin.ApiSearchDeploymentSpec, len(specs))
for i, spec := range specs {
Expand All @@ -333,13 +334,16 @@ func newSearchDeploymentReq(ctx context.Context, searchNodePlan *tfSearchDeploym
}
}

func newTFSearchDeployment(ctx context.Context, clusterName string, deployResp *admin.ApiSearchDeploymentResponse, timeout timeouts.Value) (*tfSearchDeploymentRSModel, diag.Diagnostics) {
func newTFSearchDeployment(ctx context.Context, clusterName string, deployResp *admin.ApiSearchDeploymentResponse, timeout *timeouts.Value) (*tfSearchDeploymentRSModel, diag.Diagnostics) {
result := tfSearchDeploymentRSModel{
ID: types.StringPointerValue(deployResp.Id),
ClusterName: types.StringValue(clusterName),
ProjectID: types.StringPointerValue(deployResp.GroupId),
StateName: types.StringPointerValue(deployResp.StateName),
Timeouts: timeout,
}

if timeout != nil {
result.Timeouts = *timeout
}

specsList, diagnostics := types.ListValueFrom(ctx, SpecObjectType, newTFSpecsModel(deployResp.Specs))
Expand Down
28 changes: 20 additions & 8 deletions mongodbatlas/fw_resource_mongodbatlas_search_deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,23 @@ func TestAccSearchNode_basic(t *testing.T) {
}

func newSearchNodeTestStep(resourceName, orgID, projectName, clusterName, instanceSize string, searchNodeCount int) resource.TestStep {
resourceChecks := searchNodeChecks(resourceName, clusterName, instanceSize, searchNodeCount)
dataSourceChecks := searchNodeChecks(fmt.Sprintf("data.%s", resourceName), clusterName, instanceSize, searchNodeCount)
return resource.TestStep{
Config: testAccMongoDBAtlasSearchDeploymentConfig(orgID, projectName, clusterName, instanceSize, searchNodeCount),
Check: resource.ComposeTestCheckFunc(
testAccCheckMongoDBAtlasSearchNodeExists(resourceName),
resource.TestCheckResourceAttrSet(resourceName, "project_id"),
resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName),
resource.TestCheckResourceAttr(resourceName, "specs.0.instance_size", instanceSize),
resource.TestCheckResourceAttr(resourceName, "specs.0.node_count", fmt.Sprintf("%d", searchNodeCount)),
resource.TestCheckResourceAttrSet(resourceName, "state_name"),
),
Check: resource.ComposeTestCheckFunc(append(resourceChecks, dataSourceChecks...)...),
}
}

func searchNodeChecks(targetName, clusterName, instanceSize string, searchNodeCount int) []resource.TestCheckFunc {
return []resource.TestCheckFunc{
testAccCheckMongoDBAtlasSearchNodeExists(targetName),
resource.TestCheckResourceAttrSet(targetName, "id"),
resource.TestCheckResourceAttrSet(targetName, "project_id"),
resource.TestCheckResourceAttr(targetName, "cluster_name", clusterName),
resource.TestCheckResourceAttr(targetName, "specs.0.instance_size", instanceSize),
resource.TestCheckResourceAttr(targetName, "specs.0.node_count", fmt.Sprintf("%d", searchNodeCount)),
resource.TestCheckResourceAttrSet(targetName, "state_name"),
}
}

Expand All @@ -65,6 +72,11 @@ func testAccMongoDBAtlasSearchDeploymentConfig(orgID, projectName, clusterName,
}
]
}
data "mongodbatlas_search_deployment" "test" {
project_id = mongodbatlas_search_deployment.test.project_id
cluster_name = mongodbatlas_search_deployment.test.cluster_name
}
`, clusterConfig, instanceSize, searchNodeCount)
}

Expand Down
39 changes: 39 additions & 0 deletions website/docs/d/search_deployment.html.markdown
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
---
layout: "mongodbatlas"
page_title: "MongoDB Atlas: search deployment"
sidebar_current: "docs-mongodbatlas-datasource-search-deployment"
description: |-
Describes a Search Deployment.
---

# Data Source: mongodbatlas_search_deployment

`mongodbatlas_search_deployment` describes a search node deployment.

## Example Usage

```terraform
data "mongodbatlas_search_deployment" "test" {
project_id = "<PROJECT_ID>"
cluster_name = "<CLUSTER_NAME>"
}
```

## Argument Reference

* `project_id` - (Required) The unique identifier for the [project](https://docs.atlas.mongodb.com/organizations-projects/#std-label-projects) that contains the specified cluster.
* `cluster_name` - (Required) The name of the cluster containing a search node deployment.

## Attributes Reference

* `specs` - List of settings that configure the search nodes for your cluster. See [specs](#specs).
* `state_name` - Human-readable label that indicates the current operating condition of this search node deployment.

### Specs
TODO: add proper link here
* `instance_size` - (Required) Hardware specification for the search node instance sizes. The [MongoDB Atlas API](https://docs.atlas.mongodb.com/reference/api/) describes the valid values. More details can also be found in the [Search Node Documentation](https://www.mongodb.com/docs/atlas/cluster-config/multi-cloud-distribution/#search-tier).
* `node_count` - (Required) Number of search nodes in the cluster.


TODO: add proper link here
For more information see: [MongoDB Atlas API - Search Node](https://docs.atlas.mongodb.com/reference/api/) Documentation.

0 comments on commit 6fc6e3e

Please sign in to comment.