Skip to content

Commit

Permalink
azurerm_cosmosdb_mongo_collection: add analytical_storage_ttl arg…
Browse files Browse the repository at this point in the history
  • Loading branch information
favoretti committed May 26, 2021
1 parent 0b06c82 commit 5f555f4
Show file tree
Hide file tree
Showing 5 changed files with 98 additions and 2 deletions.
36 changes: 36 additions & 0 deletions azurerm/internal/services/cosmos/cosmosdb_account_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1603,6 +1603,42 @@ resource "azurerm_cosmosdb_account" "test" {
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, string(kind), string(consistency))
}

func (CosmosDBAccountResource) mongoAnalyticalStorage(data acceptance.TestData, consistency documentdb.DefaultConsistencyLevel) string {
return fmt.Sprintf(`
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "acctestRG-cosmos-%d"
location = "%s"
}
resource "azurerm_cosmosdb_account" "test" {
name = "acctest-ca-%d"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
offer_type = "Standard"
kind = "MongoDB"
analytical_storage_enabled = true
consistency_policy {
consistency_level = "%s"
}
capabilities {
name = "EnableMongo"
}
geo_location {
location = azurerm_resource_group.test.location
failover_priority = 0
}
}
`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, string(consistency))
}

func checkAccCosmosDBAccount_basic(data acceptance.TestData, consistency documentdb.DefaultConsistencyLevel, locationCount int) resource.TestCheckFunc {
return resource.ComposeTestCheckFunc(
check.That(data.ResourceName).Key("name").Exists(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,12 @@ func resourceCosmosDbMongoCollection() *schema.Resource {
ValidateFunc: validation.IntAtLeast(-1),
},

"analytical_storage_ttl": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(-1),
},

"throughput": {
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -172,6 +178,10 @@ func resourceCosmosDbMongoCollectionCreate(d *schema.ResourceData, meta interfac
},
}

if analyticalStorageTTL, ok := d.GetOk("analytical_storage_ttl"); ok {
db.MongoDBCollectionCreateUpdateProperties.Resource.AnalyticalStorageTTL = utils.Int32(int32(analyticalStorageTTL.(int)))
}

if throughput, hasThroughput := d.GetOk("throughput"); hasThroughput {
if throughput != 0 {
db.MongoDBCollectionCreateUpdateProperties.Options.Throughput = common.ConvertThroughputFromResourceData(throughput)
Expand Down Expand Up @@ -241,6 +251,10 @@ func resourceCosmosDbMongoCollectionUpdate(d *schema.ResourceData, meta interfac
},
}

if analyticalStorageTTL, ok := d.GetOk("analytical_storage_ttl"); ok {
db.MongoDBCollectionCreateUpdateProperties.Resource.AnalyticalStorageTTL = utils.Int32(int32(analyticalStorageTTL.(int)))
}

if shardKey := d.Get("shard_key").(string); shardKey != "" {
db.MongoDBCollectionCreateUpdateProperties.Resource.ShardKey = map[string]*string{
shardKey: utils.String("Hash"), // looks like only hash is supported for now
Expand Down Expand Up @@ -337,6 +351,8 @@ func resourceCosmosDbMongoCollectionRead(d *schema.ResourceData, meta interface{
if err := d.Set("system_indexes", systemIndexes); err != nil {
return fmt.Errorf("failed to set `system_indexes`: %+v", err)
}

d.Set("analytical_storage_ttl", res.AnalyticalStorageTTL)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,22 @@ func TestAccCosmosDbMongoCollection_withIndex(t *testing.T) {
})
}

func TestAccCosmosDbMongoCollection_analyticalStorageTTL(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test")
r := CosmosMongoCollectionResource{}

data.ResourceTest(t, r, []resource.TestStep{
{
Config: r.analyticalStorageTTL(data),
Check: resource.ComposeAggregateTestCheckFunc(
check.That(data.ResourceName).ExistsInAzure(r),
check.That(data.ResourceName).Key("analytical_storage_ttl").HasValue("600"),
),
},
data.ImportStep(),
})
}

func TestAccCosmosDbMongoCollection_autoscale(t *testing.T) {
data := acceptance.BuildTestData(t, "azurerm_cosmosdb_mongo_collection", "test")
r := CosmosMongoCollectionResource{}
Expand Down Expand Up @@ -389,3 +405,29 @@ resource "azurerm_cosmosdb_mongo_collection" "test" {
}
`, CosmosDBAccountResource{}.capabilities(data, documentdb.MongoDB, []string{"EnableMongo", "EnableServerless"}), data.RandomInteger)
}

func (CosmosMongoCollectionResource) analyticalStorageTTL(data acceptance.TestData) string {
return fmt.Sprintf(`
%[1]s
resource "azurerm_cosmosdb_mongo_database" "test" {
name = "acctest-%[2]d"
resource_group_name = azurerm_cosmosdb_account.test.resource_group_name
account_name = azurerm_cosmosdb_account.test.name
}
resource "azurerm_cosmosdb_mongo_collection" "test" {
name = "acctest-%[2]d"
resource_group_name = azurerm_cosmosdb_mongo_database.test.resource_group_name
account_name = azurerm_cosmosdb_mongo_database.test.account_name
database_name = azurerm_cosmosdb_mongo_database.test.name
index {
keys = ["_id"]
unique = true
}
analytical_storage_ttl = 600
}
`, CosmosDBAccountResource{}.mongoAnalyticalStorage(data, documentdb.Eventual), data.RandomInteger, data.RandomInteger)
}
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,9 @@ func resourceCosmosDbSQLContainer() *schema.Resource {
"autoscale_settings": common.DatabaseAutoscaleSettingsSchema(),

"analytical_storage_ttl": {
Type: schema.TypeInt,
Optional: true,
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(-1),
},

"default_ttl": {
Expand Down
1 change: 1 addition & 0 deletions website/docs/r/cosmosdb_mongo_collection.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ The following arguments are supported:
* `database_name` - (Required) The name of the Cosmos DB Mongo Database in which the Cosmos DB Mongo Collection is created. Changing this forces a new resource to be created.
* `default_ttl_seconds` - (Required) The default Time To Live in seconds. If the value is `-1` or `0`, items are not automatically expired.
* `shard_key` - (Required) The name of the key to partition on for sharding. There must not be any other unique index keys.
* `analytical_storage_ttl` - (Optional) The default time to live of Analytical Storage for this Mongo Collection. If present and the value is set to `-1`, it is equal to infinity, and items don’t expire by default. If present and the value is set to some number `n` – items will expire `n` seconds after their last modified time.
* `index` - (Optional) One or more `index` blocks as defined below.
* `throughput` - (Optional) The throughput of the MongoDB collection (RU/s). Must be set in increments of `100`. The minimum value is `400`. This must be set upon database creation otherwise it cannot be updated without a manual terraform destroy-apply.
* `autoscale_settings` - (Optional) An `autoscale_settings` block as defined below. This must be set upon database creation otherwise it cannot be updated without a manual terraform destroy-apply. Requires `shard_key` to be set.
Expand Down

0 comments on commit 5f555f4

Please sign in to comment.