diff --git a/.changelog/2387.txt b/.changelog/2387.txt new file mode 100644 index 0000000000..e56999ff5d --- /dev/null +++ b/.changelog/2387.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_cloud_backup_schedule: Updates `copy_settings` on changes (even when empty) +``` \ No newline at end of file diff --git a/.changelog/2394.txt b/.changelog/2394.txt new file mode 100644 index 0000000000..6afb5599ae --- /dev/null +++ b/.changelog/2394.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_search_index: Returns error if the `analyzers` attribute contains unknown fields +``` diff --git a/.changelog/2396.txt b/.changelog/2396.txt new file mode 100644 index 0000000000..5bb53f7fda --- /dev/null +++ b/.changelog/2396.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/mongodbatlas_advanced_cluster: Fixes `disk_iops` attribute for Azure cloud provider +``` diff --git a/CHANGELOG.md b/CHANGELOG.md index a84a5bbe2f..56116aa948 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ ## (Unreleased) +BUG FIXES: + +* resource/mongodbatlas_advanced_cluster: Fixes `disk_iops` attribute for Azure cloud provider ([#2396](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2396)) +* resource/mongodbatlas_cloud_backup_schedule: Updates `copy_settings` on changes (even when empty) ([#2387](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2387)) +* resource/mongodbatlas_search_index: Returns error if the `analyzers` attribute contains unknown fields ([#2394](https://github.com/mongodb/terraform-provider-mongodbatlas/pull/2394)) + ## 1.17.3 (June 27, 2024) ## 1.17.2 (June 20, 2024) diff --git a/go.mod b/go.mod index f43688becb..eb750c90f4 100644 --- a/go.mod +++ b/go.mod @@ -4,21 +4,21 @@ go 1.22 require ( github.com/andygrunwald/go-jira/v2 v2.0.0-20240116150243-50d59fe116d6 - github.com/aws/aws-sdk-go v1.54.13 + github.com/aws/aws-sdk-go v1.54.17 github.com/go-test/deep v1.1.1 github.com/hashicorp/go-changelog v0.0.0-20240318095659-4d68c58a6e7f github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/go-version v1.7.0 github.com/hashicorp/hcl/v2 v2.21.0 - github.com/hashicorp/terraform-plugin-framework v1.9.0 + github.com/hashicorp/terraform-plugin-framework v1.10.0 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 - github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 github.com/hashicorp/terraform-plugin-go v0.23.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-mux v0.16.0 github.com/hashicorp/terraform-plugin-sdk v1.17.2 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 - github.com/hashicorp/terraform-plugin-testing v1.8.0 + github.com/hashicorp/terraform-plugin-testing v1.9.0 github.com/mongodb-forks/digest v1.1.0 github.com/spf13/cast v1.6.0 github.com/stretchr/testify v1.9.0 @@ -76,7 +76,7 @@ require ( github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/hc-install v0.6.4 // indirect + github.com/hashicorp/hc-install v0.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect @@ -121,15 +121,15 @@ require ( go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/api v0.162.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect diff --git a/go.sum b/go.sum index 0926edc4cc..2d40220fdb 100644 --- a/go.sum +++ b/go.sum @@ -243,8 +243,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.54.13 h1:zpCuiG+/mFdDY/klKJvmSioAZWk45F4rLGq0JWVAAzk= -github.com/aws/aws-sdk-go v1.54.13/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.17 h1:ZV/qwcCIhMHgsJ6iXXPVYI0s1MdLT+5LW28ClzCUPeI= +github.com/aws/aws-sdk-go v1.54.17/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= @@ -499,8 +499,8 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -518,12 +518,12 @@ github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf github.com/hashicorp/terraform-json v0.10.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= -github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwkB2+kXouLC932Li9qelozrc= +github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= @@ -535,8 +535,8 @@ github.com/hashicorp/terraform-plugin-sdk v1.17.2/go.mod h1:wkvldbraEMkz23NxkkAs github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= github.com/hashicorp/terraform-plugin-test/v2 v2.2.1/go.mod h1:eZ9JL3O69Cb71Skn6OhHyj17sLmHRb+H6VrDcJjKrYU= -github.com/hashicorp/terraform-plugin-testing v1.8.0 h1:wdYIgwDk4iO933gC4S8KbKdnMQShu6BXuZQPScmHvpk= -github.com/hashicorp/terraform-plugin-testing v1.8.0/go.mod h1:o2kOgf18ADUaZGhtOl0YCkfIxg01MAiMATT2EtIHlZk= +github.com/hashicorp/terraform-plugin-testing v1.9.0 h1:xOsQRqqlHKXpFq6etTxih3ubdK3HVDtfE1IY7Rpd37o= +github.com/hashicorp/terraform-plugin-testing v1.9.0/go.mod h1:fhhVx/8+XNJZTD5o3b4stfZ6+q7z9+lIWigIYdT6/44= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= @@ -829,8 +829,8 @@ golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -874,8 +874,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -944,8 +944,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -990,8 +990,8 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1089,8 +1089,8 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1104,8 +1104,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1125,8 +1125,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1194,8 +1194,9 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/service/advancedcluster/model_advanced_cluster.go b/internal/service/advancedcluster/model_advanced_cluster.go index 0bd011c764..8f26c1312b 100644 --- a/internal/service/advancedcluster/model_advanced_cluster.go +++ b/internal/service/advancedcluster/model_advanced_cluster.go @@ -634,10 +634,12 @@ func flattenAdvancedReplicationSpecRegionConfigSpec(apiObject *admin.DedicatedHa if len(tfMapObjects) > 0 { tfMapObject := tfMapObjects[0].(map[string]any) - if providerName == "AWS" { + if providerName == constant.AWS || providerName == constant.AZURE { if cast.ToInt64(apiObject.GetDiskIOPS()) > 0 { tfMap["disk_iops"] = apiObject.GetDiskIOPS() } + } + if providerName == constant.AWS { if v, ok := tfMapObject["ebs_volume_type"]; ok && v.(string) != "" { tfMap["ebs_volume_type"] = apiObject.GetEbsVolumeType() } @@ -850,10 +852,12 @@ func expandRegionConfig(tfMap map[string]any) *admin.CloudRegionConfig { func expandRegionConfigSpec(tfList []any, providerName string) *admin.DedicatedHardwareSpec { tfMap, _ := tfList[0].(map[string]any) apiObject := new(admin.DedicatedHardwareSpec) - if providerName == "AWS" { + if providerName == constant.AWS || providerName == constant.AZURE { if v, ok := tfMap["disk_iops"]; ok && v.(int) > 0 { apiObject.DiskIOPS = conversion.Pointer(v.(int)) } + } + if providerName == constant.AWS { if v, ok := tfMap["ebs_volume_type"]; ok { apiObject.EbsVolumeType = conversion.StringPtr(v.(string)) } diff --git a/internal/service/advancedcluster/resource_advanced_cluster_test.go b/internal/service/advancedcluster/resource_advanced_cluster_test.go index 80823e1735..8f72ce42de 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster_test.go +++ b/internal/service/advancedcluster/resource_advanced_cluster_test.go @@ -675,7 +675,9 @@ func checkSingleProvider(projectID, name string) resource.TestCheckFunc { "name": name}, resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"), resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), - resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0))) + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0))) } func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string { @@ -830,7 +832,13 @@ func checkMultiCloudSharded(name string) resource.TestCheckFunc { return checkAggr( []string{"project_id", "replication_specs.#", "replication_specs.0.region_configs.#"}, map[string]string{ - "name": name}) + "name": name}, + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.1.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.analytics_specs.0.disk_iops", acc.IntGreatThan(0)), + resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.1.electable_specs.0.disk_iops", acc.IntGreatThan(0))) } func configSingleProviderPaused(projectID, clusterName string, paused bool, instanceSize string) string { diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go index 19a1e70d05..202a3cfb2d 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule.go @@ -492,9 +492,9 @@ func cloudBackupScheduleCreateOrUpdate(ctx context.Context, connV2 *admin.APICli } req := &admin.DiskBackupSnapshotSchedule{} - - if v, ok := d.GetOk("copy_settings"); ok && len(v.([]any)) > 0 { - req.CopySettings = expandCopySettings(v.([]any)) + copySettings := d.Get("copy_settings") + if copySettings != nil && (conversion.HasElementsSliceOrMap(copySettings) || d.HasChange("copy_settings")) { + req.CopySettings = expandCopySettings(copySettings.([]any)) } var policiesItem []admin.DiskBackupApiPolicyItem @@ -642,11 +642,7 @@ func expandCopySetting(tfMap map[string]any) *admin.DiskBackupCopySetting { } func expandCopySettings(tfList []any) *[]admin.DiskBackupCopySetting { - if len(tfList) == 0 { - return nil - } - - var copySettings []admin.DiskBackupCopySetting + copySettings := make([]admin.DiskBackupCopySetting, 0) for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]any) diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 3d0bb0bf60..c9dff0becc 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -254,7 +254,45 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { var ( projectID = acc.ProjectIDExecution(t) clusterName = acc.RandomClusterName() + checkMap = map[string]string{ + "cluster_name": clusterName, + "reference_hour_of_day": "3", + "reference_minute_of_hour": "45", + "restore_window_days": "1", + "policy_item_hourly.#": "1", + "policy_item_daily.#": "1", + "policy_item_weekly.#": "1", + "policy_item_monthly.#": "1", + "policy_item_yearly.#": "1", + "policy_item_hourly.0.frequency_interval": "1", + "policy_item_hourly.0.retention_unit": "days", + "policy_item_hourly.0.retention_value": "1", + "policy_item_daily.0.frequency_interval": "1", + "policy_item_daily.0.retention_unit": "days", + "policy_item_daily.0.retention_value": "2", + "policy_item_weekly.0.frequency_interval": "4", + "policy_item_weekly.0.retention_unit": "weeks", + "policy_item_weekly.0.retention_value": "3", + "policy_item_monthly.0.frequency_interval": "5", + "policy_item_monthly.0.retention_unit": "months", + "policy_item_monthly.0.retention_value": "4", + "policy_item_yearly.0.frequency_interval": "1", + "policy_item_yearly.0.retention_unit": "years", + "policy_item_yearly.0.retention_value": "1", + } + copySettingsChecks = map[string]string{ + "copy_settings.#": "1", + "copy_settings.0.cloud_provider": "AWS", + "copy_settings.0.region_name": "US_EAST_1", + "copy_settings.0.should_copy_oplogs": "true", + } + emptyCopySettingsChecks = map[string]string{ + "copy_settings.#": "0", + } ) + checksDefault := acc.AddAttrChecks(resourceName, []resource.TestCheckFunc{checkExists(resourceName)}, checkMap) + checksCreate := acc.AddAttrChecks(resourceName, checksDefault, copySettingsChecks) + checksUpdate := acc.AddAttrChecks(resourceName, checksDefault, emptyCopySettingsChecks) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -262,41 +300,20 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(projectID, clusterName, &admin.DiskBackupSnapshotSchedule{ + Config: configCopySettings(projectID, clusterName, false, &admin.DiskBackupSnapshotSchedule{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), }), - Check: resource.ComposeAggregateTestCheckFunc( - checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterName), - resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), - resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), - resource.TestCheckResourceAttr(resourceName, "restore_window_days", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.#", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), - resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.0.frequency_interval", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.0.retention_unit", "days"), - resource.TestCheckResourceAttr(resourceName, "policy_item_daily.0.retention_value", "2"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.0.frequency_interval", "4"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.0.retention_unit", "weeks"), - resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.0.retention_value", "3"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.0.frequency_interval", "5"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.0.retention_unit", "months"), - resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.0.retention_value", "4"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.frequency_interval", "1"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_unit", "years"), - resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_value", "1"), - resource.TestCheckResourceAttr(resourceName, "copy_settings.0.cloud_provider", "AWS"), - resource.TestCheckResourceAttr(resourceName, "copy_settings.0.region_name", "US_EAST_1"), - resource.TestCheckResourceAttr(resourceName, "copy_settings.0.should_copy_oplogs", "true"), - ), + Check: resource.ComposeAggregateTestCheckFunc(checksCreate...), + }, + { + Config: configCopySettings(projectID, clusterName, true, &admin.DiskBackupSnapshotSchedule{ + ReferenceHourOfDay: conversion.Pointer(3), + ReferenceMinuteOfHour: conversion.Pointer(45), + RestoreWindowDays: conversion.Pointer(1), + }), + Check: resource.ComposeAggregateTestCheckFunc(checksUpdate...), }, }, }) @@ -357,7 +374,8 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true, ProviderName: constant.AZURE}) + spec = acc.ReplicationSpecRequest{ProviderName: constant.AZURE} + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{spec}}) ) resource.ParallelTest(t, resource.TestCase{ @@ -507,7 +525,23 @@ func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) s `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configCopySettings(projectID, clusterName string, p *admin.DiskBackupSnapshotSchedule) string { +func configCopySettings(projectID, clusterName string, emptyCopySettings bool, p *admin.DiskBackupSnapshotSchedule) string { + var copySettings string + if !emptyCopySettings { + copySettings = ` + copy_settings { + cloud_provider = "AWS" + frequencies = ["HOURLY", + "DAILY", + "WEEKLY", + "MONTHLY", + "YEARLY", + "ON_DEMAND"] + region_name = "US_EAST_1" + replication_spec_id = mongodbatlas_cluster.my_cluster.replication_specs.*.id[0] + should_copy_oplogs = true + }` + } return fmt.Sprintf(` resource "mongodbatlas_cluster" "my_cluster" { project_id = %[1]q @@ -564,20 +598,9 @@ func configCopySettings(projectID, clusterName string, p *admin.DiskBackupSnapsh retention_unit = "years" retention_value = 1 } - copy_settings { - cloud_provider = "AWS" - frequencies = ["HOURLY", - "DAILY", - "WEEKLY", - "MONTHLY", - "YEARLY", - "ON_DEMAND"] - region_name = "US_EAST_1" - replication_spec_id = mongodbatlas_cluster.my_cluster.replication_specs.*.id[0] - should_copy_oplogs = true - } + %s } - `, projectID, clusterName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, projectID, clusterName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings) } func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go index 342a354f21..68cdc51f5e 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go @@ -80,7 +80,11 @@ func TestAccClusterRSGlobalCluster_withAWSAndBackup(t *testing.T) { func TestAccClusterRSGlobalCluster_database(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{Geosharded: true, ExtraConfig: zonesStr}) + specUS = acc.ReplicationSpecRequest{ZoneName: "US", Region: "US_EAST_1"} + specEU = acc.ReplicationSpecRequest{ZoneName: "EU", Region: "EU_WEST_1"} + specDE = acc.ReplicationSpecRequest{ZoneName: "DE", Region: "EU_NORTH_1"} + specJP = acc.ReplicationSpecRequest{ZoneName: "JP", Region: "AP_NORTHEAST_1"} + clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{Geosharded: true, ReplicationSpecs: []acc.ReplicationSpecRequest{specUS, specEU, specDE, specJP}}) ) resource.Test(t, resource.TestCase{ @@ -268,47 +272,4 @@ const ( zone = "JP" } ` - - zonesStr = ` - replication_specs { - zone_name = "US" - num_shards = 1 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - replication_specs { - zone_name = "EU" - num_shards = 1 - regions_config { - region_name = "EU_WEST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - replication_specs { - zone_name = "DE" - num_shards = 1 - regions_config { - region_name = "EU_NORTH_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - replication_specs { - zone_name = "JP" - num_shards = 1 - regions_config { - region_name = "AP_NORTHEAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - ` ) diff --git a/internal/service/searchdeployment/state_transition_search_deployment.go b/internal/service/searchdeployment/state_transition_search_deployment.go index ff0ea37ab1..3ba981c451 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment.go +++ b/internal/service/searchdeployment/state_transition_search_deployment.go @@ -13,7 +13,7 @@ import ( "go.mongodb.org/atlas-sdk/v20240530002/admin" ) -const SearchDeploymentDoesNotExistsError = "ATLAS_FTS_DEPLOYMENT_DOES_NOT_EXIST" +const SearchDeploymentDoesNotExistsError = "ATLAS_SEARCH_DEPLOYMENT_DOES_NOT_EXIST" func WaitSearchNodeStateTransition(ctx context.Context, projectID, clusterName string, client admin.AtlasSearchApi, timeConfig retrystrategy.TimeConfig) (*admin.ApiSearchDeploymentResponse, error) { @@ -56,7 +56,7 @@ func searchDeploymentRefreshFunc(ctx context.Context, projectID, clusterName str return nil, "", err } if err != nil { - if resp.StatusCode == 400 && strings.Contains(err.Error(), SearchDeploymentDoesNotExistsError) { + if resp.StatusCode == 404 && strings.Contains(err.Error(), SearchDeploymentDoesNotExistsError) { return "", retrystrategy.RetryStrategyDeletedState, nil } if resp.StatusCode == 503 { diff --git a/internal/service/searchdeployment/state_transition_search_deployment_test.go b/internal/service/searchdeployment/state_transition_search_deployment_test.go index ea9b197a50..21511e0d95 100644 --- a/internal/service/searchdeployment/state_transition_search_deployment_test.go +++ b/internal/service/searchdeployment/state_transition_search_deployment_test.go @@ -20,7 +20,7 @@ var ( updating = "UPDATING" idle = "IDLE" unknown = "" - sc400 = conversion.IntPtr(400) + sc404 = conversion.IntPtr(404) sc500 = conversion.IntPtr(500) sc503 = conversion.IntPtr(503) ) @@ -94,7 +94,7 @@ func TestSearchDeploymentStateTransitionForDelete(t *testing.T) { name: "Regular transition to DELETED", mockResponses: []response{ {state: &updating}, - {statusCode: sc400, err: errors.New(searchdeployment.SearchDeploymentDoesNotExistsError)}, + {statusCode: sc404, err: errors.New(searchdeployment.SearchDeploymentDoesNotExistsError)}, }, expectedError: false, }, diff --git a/internal/service/searchindex/resource_search_index.go b/internal/service/searchindex/resource_search_index.go index 0139101588..c338a66316 100644 --- a/internal/service/searchindex/resource_search_index.go +++ b/internal/service/searchindex/resource_search_index.go @@ -1,6 +1,7 @@ package searchindex import ( + "bytes" "context" "errors" "fmt" diff --git a/internal/service/searchindex/resource_search_index_test.go b/internal/service/searchindex/resource_search_index_test.go index 5d3342ca20..d296c679d6 100644 --- a/internal/service/searchindex/resource_search_index_test.go +++ b/internal/service/searchindex/resource_search_index_test.go @@ -3,6 +3,7 @@ package searchindex_test import ( "context" "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -114,6 +115,10 @@ func TestAccSearchIndex_updatedToEmptyAnalyzers(t *testing.T) { Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), Check: checkAdditionalAnalyzers(projectID, indexName, databaseName, clusterName, false), }, + { + Config: configAdditional(projectID, indexName, databaseName, clusterName, incorrectFormatAnalyzersTF), + ExpectError: regexp.MustCompile("cannot unmarshal search index attribute `analyzers` because it has an incorrect format"), + }, }, }) } @@ -537,8 +542,9 @@ const ( with = true without = false - analyzersTF = "\nanalyzers = <<-EOF\n" + analyzersJSON + "\nEOF\n" - mappingsFieldsTF = "\nmappings_fields = <<-EOF\n" + mappingsFieldsJSON + "\nEOF\n" + analyzersTF = "\nanalyzers = <<-EOF\n" + analyzersJSON + "\nEOF\n" + incorrectFormatAnalyzersTF = "\nanalyzers = <<-EOF\n" + incorrectFormatAnalyzersJSON + "\nEOF\n" + mappingsFieldsTF = "\nmappings_fields = <<-EOF\n" + mappingsFieldsJSON + "\nEOF\n" analyzersJSON = ` [ @@ -609,15 +615,18 @@ const ( "similarity": "euclidean" }] ` - storedSourceIncludeJSON = ` - { - "include": ["include1","include2"] - } - ` - storedSourceExcludeJSON = ` - { - "exclude": ["exclude1", "exclude2"] - } + incorrectFormatAnalyzersJSON = ` + [ + { + "wrongField":[ + { + "type":"length", + "min":20, + "max":33 + } + ] + } + ] ` ) diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index c581f88464..286fee15e3 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -6,12 +6,14 @@ import ( "testing" "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) type ClusterRequest struct { - ProviderName string - ExtraConfig string ResourceDependencyName string + ClusterNameExplicit string + ReplicationSpecs []ReplicationSpecRequest + DiskSizeGb int CloudBackup bool Geosharded bool } @@ -20,6 +22,7 @@ type ClusterInfo struct { ProjectIDStr string ProjectID string ClusterName string + ClusterResourceName string ClusterNameStr string ClusterTerraformStr string } @@ -32,9 +35,6 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { if req == nil { req = new(ClusterRequest) } - if req.ProviderName == "" { - req.ProviderName = constant.AWS - } clusterName := os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") if clusterName != "" && projectID != "" { @@ -47,48 +47,17 @@ func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { } } projectID = ProjectIDExecution(tb) - clusterName = RandomClusterName() - clusterTypeStr := "REPLICASET" - if req.Geosharded { - clusterTypeStr = "GEOSHARDED" - } - dependsOnClause := "" - if req.ResourceDependencyName != "" { - dependsOnClause = fmt.Sprintf(` - depends_on = [ - %[1]s - ] - `, req.ResourceDependencyName) + clusterTerraformStr, clusterName, err := ClusterResourceHcl(projectID, req) + if err != nil { + tb.Error(err) } - clusterTerraformStr := fmt.Sprintf(` - resource "mongodbatlas_cluster" "test_cluster" { - project_id = %[1]q - name = %[2]q - cloud_backup = %[3]t - auto_scaling_disk_gb_enabled = false - provider_name = %[4]q - provider_instance_size_name = "M10" - - cluster_type = %[5]q - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - regions_config { - region_name = "US_WEST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - %[6]s - %[7]s - } - `, projectID, clusterName, req.CloudBackup, req.ProviderName, clusterTypeStr, req.ExtraConfig, dependsOnClause) + clusterResourceName := "mongodbatlas_advanced_cluster.cluster_info" return ClusterInfo{ ProjectIDStr: fmt.Sprintf("%q", projectID), ProjectID: projectID, ClusterName: clusterName, - ClusterNameStr: "mongodbatlas_cluster.test_cluster.name", + ClusterNameStr: fmt.Sprintf("%s.name", clusterResourceName), + ClusterResourceName: clusterResourceName, ClusterTerraformStr: clusterTerraformStr, } } @@ -98,3 +67,64 @@ func ExistingClusterUsed() bool { projectID := os.Getenv("MONGODB_ATLAS_PROJECT_ID") return clusterName != "" && projectID != "" } + +type ReplicationSpecRequest struct { + ZoneName string + Region string + InstanceSize string + ProviderName string + ExtraRegionConfigs []ReplicationSpecRequest + NodeCount int +} + +func (r *ReplicationSpecRequest) AddDefaults() { + if r.NodeCount == 0 { + r.NodeCount = 3 + } + if r.ZoneName == "" { + r.ZoneName = "Zone 1" + } + if r.Region == "" { + r.Region = "US_WEST_2" + } + if r.InstanceSize == "" { + r.InstanceSize = "M10" + } + if r.ProviderName == "" { + r.ProviderName = constant.AWS + } +} + +func (r *ReplicationSpecRequest) AllRegionConfigs() []admin.CloudRegionConfig { + config := CloudRegionConfig(*r) + configs := []admin.CloudRegionConfig{config} + for _, extra := range r.ExtraRegionConfigs { + configs = append(configs, CloudRegionConfig(extra)) + } + return configs +} + +func ReplicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { + if req == nil { + req = new(ReplicationSpecRequest) + } + req.AddDefaults() + defaultNumShards := 1 + regionConfigs := req.AllRegionConfigs() + return admin.ReplicationSpec{ + NumShards: &defaultNumShards, + ZoneName: &req.ZoneName, + RegionConfigs: ®ionConfigs, + } +} + +func CloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { + return admin.CloudRegionConfig{ + RegionName: &req.Region, + ProviderName: &req.ProviderName, + ElectableSpecs: &admin.HardwareSpec{ + InstanceSize: &req.InstanceSize, + NodeCount: &req.NodeCount, + }, + } +} diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index 93b9e40ced..6385fc9182 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -1,9 +1,17 @@ package acc import ( + "encoding/json" "fmt" + "regexp" "sort" "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func FormatToHCLMap(m map[string]string, indent, varName string) string { @@ -41,7 +49,6 @@ func FormatToHCLLifecycleIgnore(keys ...string) string { return strings.Join(lines, "\n") } -// make test deterministic func sortStringMapKeys(m map[string]string) []string { keys := make([]string, 0, len(m)) for k := range m { @@ -50,3 +57,184 @@ func sortStringMapKeys(m map[string]string) []string { sort.Strings(keys) return keys } +func sortStringMapKeysAny(m map[string]any) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +var matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") +var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") + +func ToSnakeCase(str string) string { + snake := matchFirstCap.ReplaceAllString(str, "${1}_${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}") + return strings.ToLower(snake) +} + +func ClusterResourceHcl(projectID string, req *ClusterRequest) (configStr, clusterName string, err error) { + if req == nil { + req = new(ClusterRequest) + } + specRequests := req.ReplicationSpecs + if len(specRequests) == 0 { + specRequests = append(specRequests, ReplicationSpecRequest{}) + } + specs := make([]admin.ReplicationSpec, len(specRequests)) + for i, specRequest := range specRequests { + specs[i] = ReplicationSpec(&specRequest) + } + clusterName = req.ClusterNameExplicit + if clusterName == "" { + clusterName = RandomClusterName() + } + clusterTypeStr := "REPLICASET" + if req.Geosharded { + clusterTypeStr = "GEOSHARDED" + } + + f := hclwrite.NewEmptyFile() + root := f.Body() + cluster := root.AppendNewBlock("resource", []string{"mongodbatlas_advanced_cluster", "cluster_info"}).Body() + clusterRootAttributes := map[string]any{ + "project_id": projectID, + "cluster_type": clusterTypeStr, + "name": clusterName, + "backup_enabled": req.CloudBackup, + } + if req.DiskSizeGb != 0 { + clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb + } + addPrimitiveAttributes(cluster, clusterRootAttributes) + cluster.AppendNewline() + for i, spec := range specs { + err = writeReplicationSpec(cluster, spec) + if err != nil { + return "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) + } + } + cluster.AppendNewline() + if req.ResourceDependencyName != "" { + if !strings.Contains(req.ResourceDependencyName, ".") { + return "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") + } + err = setAttributeHcl(cluster, fmt.Sprintf("depends_on = [%s]", req.ResourceDependencyName)) + if err != nil { + return "", "", err + } + } + return "\n" + string(f.Bytes()), clusterName, err +} + +func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { + replicationBlock := cluster.AppendNewBlock("replication_specs", nil).Body() + err := addPrimitiveAttributesViaJSON(replicationBlock, spec) + if err != nil { + return err + } + for _, rc := range spec.GetRegionConfigs() { + if rc.Priority == nil { + rc.SetPriority(7) + } + replicationBlock.AppendNewline() + rcBlock := replicationBlock.AppendNewBlock("region_configs", nil).Body() + err = addPrimitiveAttributesViaJSON(rcBlock, rc) + if err != nil { + return err + } + autoScalingBlock := rcBlock.AppendNewBlock("auto_scaling", nil).Body() + if rc.AutoScaling == nil { + autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(false)) + } else { + autoScaling := rc.GetAutoScaling() + return fmt.Errorf("auto_scaling on replication spec is not supportd yet %v", autoScaling) + } + nodeSpec := rc.GetElectableSpecs() + nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() + err = addPrimitiveAttributesViaJSON(nodeSpecBlock, nodeSpec) + } + return err +} + +// addPrimitiveAttributesViaJSON adds "primitive" bool/string/int/float attributes of a struct. +func addPrimitiveAttributesViaJSON(b *hclwrite.Body, obj any) error { + var objMap map[string]any + inrec, err := json.Marshal(obj) + if err != nil { + return err + } + err = json.Unmarshal(inrec, &objMap) + if err != nil { + return err + } + addPrimitiveAttributes(b, objMap) + return nil +} + +func addPrimitiveAttributes(b *hclwrite.Body, values map[string]any) { + for _, keyCamel := range sortStringMapKeysAny(values) { + key := ToSnakeCase(keyCamel) + value := values[keyCamel] + switch value := value.(type) { + case bool: + b.SetAttributeValue(key, cty.BoolVal(value)) + case string: + if value != "" { + b.SetAttributeValue(key, cty.StringVal(value)) + } + case int: + b.SetAttributeValue(key, cty.NumberIntVal(int64(value))) + // int gets parsed as float64 for json + case float64: + b.SetAttributeValue(key, cty.NumberIntVal(int64(value))) + default: + continue + } + } +} + +// Sometimes it is easier to set a value using hcl/tf syntax instead of creating complex values like list hcl.Traversal. +func setAttributeHcl(body *hclwrite.Body, tfExpression string) error { + src := []byte(tfExpression) + + f, diags := hclwrite.ParseConfig(src, "", hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return fmt.Errorf("extract attribute error %s\nparsing %s", diags, tfExpression) + } + expressionAttributes := f.Body().Attributes() + if len(expressionAttributes) != 1 { + return fmt.Errorf("must be a single attribute in expression: %s", tfExpression) + } + tokens := hclwrite.Tokens{} + for _, attr := range expressionAttributes { + tokens = attr.BuildTokens(tokens) + } + if len(tokens) == 0 { + return fmt.Errorf("no tokens found for expression %s", tfExpression) + } + var attributeName string + valueTokens := []*hclwrite.Token{} + equalFound := false + for _, token := range tokens { + if attributeName == "" && token.Type == hclsyntax.TokenIdent { + attributeName = string(token.Bytes) + } + if equalFound { + valueTokens = append(valueTokens, token) + } + if token.Type == hclsyntax.TokenEqual { + equalFound = true + } + } + if attributeName == "" { + return fmt.Errorf("unable to find the attribute name set for expr=%s", tfExpression) + } + if len(valueTokens) == 0 { + return fmt.Errorf("unable to find the attribute value set for expr=%s", tfExpression) + } + body.SetAttributeRaw(attributeName, valueTokens) + return nil +} diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 16ac5ef7f8..263f22ce9f 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -4,8 +4,10 @@ import ( "fmt" "testing" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func projectTemplateWithExtra(extra string) string { @@ -104,3 +106,255 @@ func TestFormatToHCLLifecycleIgnore(t *testing.T) { }) } } + +var standardClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var overrideClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = true + cluster_type = "GEOSHARDED" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone X" + + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = "MY_REGION_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M30" + node_count = 30 + } + } + } + +} +` + +var dependsOnClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_project.project_execution] +} +` +var dependsOnMultiResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple] +} +` +var twoReplicationSpecs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + replication_specs { + num_shards = 1 + zone_name = "Zone 2" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var twoRegionConfigs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` + +func Test_ClusterResourceHcl(t *testing.T) { + var ( + clusterName = "my-name" + testCases = map[string]struct { + expected string + req acc.ClusterRequest + }{ + "defaults": { + standardClusterResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName}, + }, + "dependsOn": { + dependsOnClusterResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, + }, + "dependsOnMulti": { + dependsOnMultiResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, + }, + "twoReplicationSpecs": { + twoReplicationSpecs, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_WEST_1", ZoneName: "Zone 1"}, + {Region: "EU_WEST_2", ZoneName: "Zone 2"}, + }}, + }, + "overrideClusterResource": { + overrideClusterResource, + acc.ClusterRequest{ClusterNameExplicit: clusterName, Geosharded: true, CloudBackup: true, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE}, + }}, + }, + "twoRegionConfigs": { + twoRegionConfigs, + acc.ClusterRequest{ClusterNameExplicit: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + { + Region: "US_WEST_1", + InstanceSize: "M10", + NodeCount: 3, + ExtraRegionConfigs: []acc.ReplicationSpecRequest{{Region: "EU_WEST_1", InstanceSize: "M10", NodeCount: 3, ProviderName: constant.AWS}}, + }, + }, + }, + }, + } + ) + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + config, actualClusterName, err := acc.ClusterResourceHcl("project", &tc.req) + require.NoError(t, err) + assert.Equal(t, clusterName, actualClusterName) + assert.Equal(t, tc.expected, config) + }) + } +}