Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Adds support for multiple API versions for adv_cluster TPF #2840

Merged
merged 29 commits into from
Dec 3, 2024
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
b382621
feat: Support update.IsEmpty
EspenAlbert Nov 27, 2024
6ce37a7
initial start of `num_shards` support
EspenAlbert Nov 27, 2024
e9f59f2
feat: differentiate API Versions used
EspenAlbert Nov 27, 2024
9357288
chore: formatting
EspenAlbert Nov 27, 2024
022eb51
feat: intial usage of latest admin api
EspenAlbert Nov 28, 2024
3aeabf6
feat: implement support for num_shards and add legacy compatibility f…
EspenAlbert Nov 28, 2024
0000526
chore: use ordinary diffs instead of colors
EspenAlbert Nov 28, 2024
1760162
refactor: remove commented-out code
EspenAlbert Nov 28, 2024
ae41f6b
test: pass static orgID for unit testing
EspenAlbert Nov 28, 2024
ff569a6
chore: initial working legacy schema
EspenAlbert Nov 29, 2024
3a9bb91
minor cleanup before review
EspenAlbert Nov 29, 2024
64cba27
test: Add acceptance test SymmetricShardedOldSchemaDiskSizeGBAtElecta…
EspenAlbert Nov 29, 2024
07b6786
feat: enhance disk size handling and normalization in advanced cluste…
EspenAlbert Nov 29, 2024
38b893d
chore: PatchPayload, support IgnoreInState
EspenAlbert Nov 29, 2024
13d1b52
address some PR comments
EspenAlbert Nov 29, 2024
51dad09
chore: Avoid update options (handled in normalize instead)
EspenAlbert Nov 29, 2024
28aeaa1
test: refresh golden file
EspenAlbert Nov 29, 2024
a7e034d
feat: Initial update support using 20240530
EspenAlbert Nov 30, 2024
ffa0962
refactor: Always set root disk size
EspenAlbert Nov 30, 2024
a175bfb
chore: update to use correct test data
EspenAlbert Nov 30, 2024
dd3c856
refactor: always set rootDiskSize
EspenAlbert Nov 30, 2024
e98c982
refactor: update legacy model to include disk size and adjust replica…
EspenAlbert Nov 30, 2024
13b7826
feat: add tests and mock data for symmetric sharded old schema config…
EspenAlbert Nov 30, 2024
b7e228b
test: fix mock file and remove manual change
EspenAlbert Nov 30, 2024
cfc9b59
chore: fix broken sharded test
EspenAlbert Nov 30, 2024
7fc29d8
refactor: minor refactorings
EspenAlbert Dec 2, 2024
e4670e4
fix: Address PR comments
EspenAlbert Dec 2, 2024
4f65aac
test: Change log message to fatal error for missing steps in request …
EspenAlbert Dec 2, 2024
2b6c361
address PR comments 2
EspenAlbert Dec 2, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 45 additions & 7 deletions internal/common/update/patch_payload.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package update

import (
"encoding/json"
"reflect"
"regexp"
"slices"
"strings"
Expand All @@ -11,12 +12,28 @@ import (
)

type attrPatchOperations struct {
data map[string][]jsondiff.Operation
data map[string][]jsondiff.Operation
ignoreInState []string
}

func newAttrPatchOperations(patch jsondiff.Patch) *attrPatchOperations {
func (m *attrPatchOperations) ignoreInStatePath(path string) bool {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changes to this file are not necessary for this PR, but will most likely be needed later.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this is not necessary now, I would remove it and add it when it's needed

for _, ignore := range m.ignoreInState {
suffix := "/" + ignore
if strings.HasSuffix(path, suffix) {
return true
}
}
return false
}

func newAttrPatchOperations(patch jsondiff.Patch, options []PatchOptions) *attrPatchOperations {
ignoreInState := []string{}
for _, option := range options {
ignoreInState = append(ignoreInState, option.IgnoreInState...)
}
self := &attrPatchOperations{
data: map[string][]jsondiff.Operation{},
data: map[string][]jsondiff.Operation{},
ignoreInState: ignoreInState,
}
for _, op := range patch {
if op.Path == "" {
Expand Down Expand Up @@ -79,10 +96,14 @@ func (m *attrPatchOperations) StatePatch(attr string) jsondiff.Patch {
lastValue = op.Value
}
if op.Type == jsondiff.OperationRemove && !indexRemoval(op.Path) {
path := op.Path
if m.ignoreInStatePath(path) {
continue
}
patch = append(patch, jsondiff.Operation{
Type: jsondiff.OperationAdd,
Value: lastValue,
Path: op.Path,
Path: path,
})
}
}
Expand Down Expand Up @@ -113,6 +134,11 @@ func convertJSONDiffToJSONPatch(patch jsondiff.Patch) (jsonpatch.Patch, error) {
return decodedPatch, nil
}

// Current limitation if the field is set as part of a nested attribute in a map
type PatchOptions struct {
IgnoreInState []string
}

// PatchPayload uses the state and plan to changes to find the patch request, including changes only when:
// - The plan has replaced, added, or removed list values from the state
// Note that we intentionally do NOT include removed state values:
Expand All @@ -129,15 +155,19 @@ func convertJSONDiffToJSONPatch(patch jsondiff.Patch) (jsonpatch.Patch, error) {
// 4. Adds nested "removed" values from the state to the request
// 5. Use `jsonpatch` to apply each attribute plan & state patch to an empty JSON object
// 6. Create a `patchReq` pointer with the final JSON object marshaled to `T` or return nil if there are no changes (`{}`)
func PatchPayload[T any](state, plan *T) (*T, error) {
func PatchPayload[T any](state, plan *T, options ...PatchOptions) (*T, error) {
return PatchPayloadDiffTypes(state, plan, options...)
}

func PatchPayloadDiffTypes[T any, U any](state *T, plan *U, options ...PatchOptions) (*U, error) {
EspenAlbert marked this conversation as resolved.
Show resolved Hide resolved
if plan == nil {
return nil, nil
}
statePlanPatch, err := jsondiff.Compare(state, plan, jsondiff.Invertible())
if err != nil {
return nil, err
}
attrOperations := newAttrPatchOperations(statePlanPatch)
attrOperations := newAttrPatchOperations(statePlanPatch, options)
reqJSON := []byte(`{}`)

addPatchToRequest := func(patchDiff jsondiff.Patch) error {
Expand All @@ -155,7 +185,7 @@ func PatchPayload[T any](state, plan *T) (*T, error) {
return nil
}

patchReq := new(T)
patchReq := new(U)
patchFromPlanDiff, err := jsondiff.Compare(patchReq, plan)
if err != nil {
return nil, err
Expand All @@ -177,3 +207,11 @@ func PatchPayload[T any](state, plan *T) (*T, error) {
}
return patchReq, json.Unmarshal(reqJSON, patchReq)
}

func IsEmpty[T any](last *T) bool {
EspenAlbert marked this conversation as resolved.
Show resolved Hide resolved
if last == nil {
return true
}
empty := new(T)
return reflect.DeepEqual(last, empty)
}
53 changes: 51 additions & 2 deletions internal/common/update/patch_payload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ func TestPatchReplicationSpecs(t *testing.T) {
state *admin.ClusterDescription20240805
plan *admin.ClusterDescription20240805
patchExpected *admin.ClusterDescription20240805
options []update.PatchOptions
}{
"ComputedValues from the state are added to plan and unchanged attributes are not included": {
state: &state,
Expand Down Expand Up @@ -187,14 +188,33 @@ func TestPatchReplicationSpecs(t *testing.T) {
plan: &planNoChanges,
patchExpected: nil,
},
"Empty array should return no changes": {
state: &admin.ClusterDescription20240805{
Labels: &[]admin.ComponentLabel{},
},
plan: &admin.ClusterDescription20240805{
Labels: &[]admin.ComponentLabel{},
},
patchExpected: nil,
},
"diskSizeGb ignored in state": {
state: clusterDescriptionDiskSizeNodeCount(50.0, 3, conversion.Pointer(50.0), 0),
plan: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0),
patchExpected: clusterDescriptionDiskSizeNodeCount(55.0, 3, nil, 0),
options: []update.PatchOptions{
{
IgnoreInState: []string{"diskSizeGB"},
},
},
},
}
)
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
if name == "Removed list entry should be included" {
t.Log("This test case is expected to fail due to the current implementation")
EspenAlbert marked this conversation as resolved.
Show resolved Hide resolved
}
patchReq, err := update.PatchPayload(tc.state, tc.plan)
patchReq, err := update.PatchPayload(tc.state, tc.plan, tc.options...)
require.NoError(t, err)
assert.Equal(t, tc.patchExpected, patchReq)
})
Expand All @@ -210,6 +230,7 @@ func TestPatchAdvancedConfig(t *testing.T) {
state *admin.ClusterDescriptionProcessArgs20240805
plan *admin.ClusterDescriptionProcessArgs20240805
patchExpected *admin.ClusterDescriptionProcessArgs20240805
options []update.PatchOptions
}{
"JavascriptEnabled is set to false": {
state: &state,
Expand Down Expand Up @@ -248,9 +269,37 @@ func TestPatchAdvancedConfig(t *testing.T) {
)
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
patchReq, err := update.PatchPayload(tc.state, tc.plan)
patchReq, err := update.PatchPayload(tc.state, tc.plan, tc.options...)
require.NoError(t, err)
assert.Equal(t, tc.patchExpected, patchReq)
})
}
}

func TestIsEmpty(t *testing.T) {
assert.True(t, update.IsEmpty(&admin.ClusterDescription20240805{}))
var myVar admin.ClusterDescription20240805
assert.True(t, update.IsEmpty(&myVar))
assert.False(t, update.IsEmpty(&admin.ClusterDescription20240805{Name: conversion.Pointer("my-cluster")}))
}

func clusterDescriptionDiskSizeNodeCount(diskSizeGBElectable float64, nodeCountElectable int, diskSizeGBReadOnly *float64, nodeCountReadOnly int) *admin.ClusterDescription20240805 {
return &admin.ClusterDescription20240805{
ReplicationSpecs: &[]admin.ReplicationSpec20240805{
{
RegionConfigs: &[]admin.CloudRegionConfig20240805{
{
ElectableSpecs: &admin.HardwareSpec20240805{
NodeCount: &nodeCountElectable,
DiskSizeGB: &diskSizeGBElectable,
},
ReadOnlySpecs: &admin.DedicatedHardwareSpec20240805{
NodeCount: &nodeCountReadOnly,
DiskSizeGB: diskSizeGBReadOnly,
},
},
},
},
},
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,34 @@ package advancedclustertpf

import (
"context"
"fmt"
"slices"
"strings"

"github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
"go.mongodb.org/atlas-sdk/v20240805005/admin"
"go.mongodb.org/atlas-sdk/v20241113001/admin"
)

func NewTFModel(ctx context.Context, input *admin.ClusterDescription20240805, timeout timeouts.Value, diags *diag.Diagnostics) *TFModel {
const (
errorZoneNameNotSet = "zoneName is required for legacy schema"
errorNumShardsNotSet = "numShards not set for zoneName %s"
errorReplicationSpecIDNotSet = "replicationSpecID not set for zoneName %s"
)

type LegacySchemaInfo struct {
ZoneNameNumShards map[string]int64
ZoneNameReplicationSpecIDs map[string]string
RootDiskSize *float64
}

func NewTFModel(ctx context.Context, input *admin.ClusterDescription20240805, timeout timeouts.Value, diags *diag.Diagnostics, legacyInfo *LegacySchemaInfo) *TFModel {
biConnector := NewBiConnectorConfigObjType(ctx, input.BiConnector, diags)
connectionStrings := NewConnectionStringsObjType(ctx, input.ConnectionStrings, diags)
labels := NewLabelsObjType(ctx, input.Labels, diags)
replicationSpecs := NewReplicationSpecsObjType(ctx, input.ReplicationSpecs, diags)
replicationSpecs := NewReplicationSpecsObjType(ctx, input.ReplicationSpecs, diags, legacyInfo)
tags := NewTagsObjType(ctx, input.Tags, diags)
if diags.HasError() {
return nil
Expand All @@ -28,6 +43,7 @@ func NewTFModel(ctx context.Context, input *admin.ClusterDescription20240805, ti
ConfigServerType: types.StringPointerValue(input.ConfigServerType),
ConnectionStrings: connectionStrings,
CreateDate: types.StringPointerValue(conversion.TimePtrToStringPtr(input.CreateDate)),
DiskSizeGB: types.Float64PointerValue(findRegionRootDiskSize(input.ReplicationSpecs)),
EncryptionAtRestProvider: types.StringPointerValue(input.EncryptionAtRestProvider),
GlobalClusterSelfManagedSharding: types.BoolPointerValue(input.GlobalClusterSelfManagedSharding),
ProjectID: types.StringPointerValue(input.GroupId),
Expand Down Expand Up @@ -96,26 +112,84 @@ func NewLabelsObjType(ctx context.Context, input *[]admin.ComponentLabel, diags
return setType
}

func NewReplicationSpecsObjType(ctx context.Context, input *[]admin.ReplicationSpec20240805, diags *diag.Diagnostics) types.List {
func NewReplicationSpecsObjType(ctx context.Context, input *[]admin.ReplicationSpec20240805, diags *diag.Diagnostics, legacyInfo *LegacySchemaInfo) types.List {
if input == nil {
return types.ListNull(ReplicationSpecsObjType)
}
var tfModels *[]TFReplicationSpecsModel
if legacyInfo == nil {
tfModels = convertReplicationSpecs(ctx, input, diags)
} else {
tfModels = convertReplicationSpecsLegacy(ctx, input, diags, legacyInfo)
}
if diags.HasError() {
return types.ListNull(ReplicationSpecsObjType)
}
listType, diagsLocal := types.ListValueFrom(ctx, ReplicationSpecsObjType, *tfModels)
diags.Append(diagsLocal...)
return listType
}

func convertReplicationSpecs(ctx context.Context, input *[]admin.ReplicationSpec20240805, diags *diag.Diagnostics) *[]TFReplicationSpecsModel {
tfModels := make([]TFReplicationSpecsModel, len(*input))
for i, item := range *input {
regionConfigs := NewRegionConfigsObjType(ctx, item.RegionConfigs, diags)
tfModels[i] = TFReplicationSpecsModel{
Id: types.StringPointerValue(item.Id),
ExternalId: types.StringNull(), // TODO: Static
ExternalId: types.StringPointerValue(item.Id),
NumShards: types.Int64Value(1), // TODO: Static
ContainerId: conversion.ToTFMapOfString(ctx, diags, nil), // TODO: Static
RegionConfigs: regionConfigs,
ZoneId: types.StringPointerValue(item.ZoneId),
ZoneName: types.StringPointerValue(item.ZoneName),
}
}
listType, diagsLocal := types.ListValueFrom(ctx, ReplicationSpecsObjType, tfModels)
diags.Append(diagsLocal...)
return listType
return &tfModels
}

func convertReplicationSpecsLegacy(ctx context.Context, input *[]admin.ReplicationSpec20240805, diags *diag.Diagnostics, legacyInfo *LegacySchemaInfo) *[]TFReplicationSpecsModel {
tfModels := []TFReplicationSpecsModel{}
tfModelsSkipIndexes := []int{}
for i, item := range *input {
if slices.Contains(tfModelsSkipIndexes, i) {
continue
}
regionConfigs := NewRegionConfigsObjType(ctx, item.RegionConfigs, diags)
zoneName := item.GetZoneName()
if zoneName == "" {
diags.AddError(errorZoneNameNotSet, errorZoneNameNotSet)
return &tfModels
}
numShards, ok := legacyInfo.ZoneNameNumShards[zoneName]
errMsg := []string{}
if !ok {
errMsg = append(errMsg, fmt.Sprintf(errorNumShardsNotSet, zoneName))
}
legacyID, ok := legacyInfo.ZoneNameReplicationSpecIDs[zoneName]
if !ok {
errMsg = append(errMsg, fmt.Sprintf(errorReplicationSpecIDNotSet, zoneName))
}
if len(errMsg) > 0 {
errMsgStr := strings.Join(errMsg, ", ")
diags.AddError(errMsgStr, errMsgStr)
EspenAlbert marked this conversation as resolved.
Show resolved Hide resolved
return &tfModels
}
if numShards > 1 {
for j := 1; j < int(numShards); j++ {
tfModelsSkipIndexes = append(tfModelsSkipIndexes, i+j)
}
}
tfModels = append(tfModels, TFReplicationSpecsModel{
ContainerId: conversion.ToTFMapOfString(ctx, diags, nil), // TODO: Static
ExternalId: types.StringPointerValue(item.Id),
Id: types.StringValue(legacyID),
RegionConfigs: regionConfigs,
NumShards: types.Int64Value(numShards),
ZoneId: types.StringPointerValue(item.ZoneId),
ZoneName: types.StringPointerValue(item.ZoneName),
})
}
return &tfModels
}

func NewTagsObjType(ctx context.Context, input *[]admin.ResourceTag, diags *diag.Diagnostics) types.Set {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion"
admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin"
"go.mongodb.org/atlas-sdk/v20240805005/admin"
"go.mongodb.org/atlas-sdk/v20241113001/admin"
)

func AddAdvancedConfig(ctx context.Context, tfModel *TFModel, input *admin.ClusterDescriptionProcessArgs20240805, inputLegacy *admin20240530.ClusterDescriptionProcessArgs, diags *diag.Diagnostics) {
Expand Down
Loading