From 8feb6f259c324e3b12684a965376a90ccb730370 Mon Sep 17 00:00:00 2001
From: Pierre-Emmanuel MERCIER
Date: Wed, 9 Oct 2024 08:29:29 +0200
Subject: [PATCH] feat: add max inactive revisions
---
CHANGELOG.md | 4 +
.../services/azurestackhci/registration.go | 1 +
.../stack_hci_virtual_hard_disk_resource.go | 318 ++++++++++++++++++
...ack_hci_virtual_hard_disk_resource_test.go | 271 +++++++++++++++
.../container_app_data_source.go | 7 +
.../containerapps/container_app_resource.go | 27 +-
.../container_app_resource_test.go | 76 +++++
website/docs/d/container_app.html.markdown | 2 +
website/docs/r/container_app.html.markdown | 3 +
.../stack_hci_virtual_hard_disk.html.markdown | 100 ++++++
10 files changed, 802 insertions(+), 7 deletions(-)
create mode 100644 internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource.go
create mode 100644 internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource_test.go
create mode 100644 website/docs/r/stack_hci_virtual_hard_disk.html.markdown
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 525471da2a445..b010ead1bccd3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,9 @@
## 4.5.0 (Unreleased)
+ENHANCEMENTS:
+
+* **New Resource**: `azurerm_stack_hci_virtual_hard_disk` [GH-27474]
+
BUG FIXES:
* `azurerm_mssql_database` - now creates successfully when elastic pool is hyperscale [GH-27505]
diff --git a/internal/services/azurestackhci/registration.go b/internal/services/azurestackhci/registration.go
index 188d28b0e618a..bc417dbaf9e18 100644
--- a/internal/services/azurestackhci/registration.go
+++ b/internal/services/azurestackhci/registration.go
@@ -54,5 +54,6 @@ func (r Registration) Resources() []sdk.Resource {
StackHCIDeploymentSettingResource{},
StackHCILogicalNetworkResource{},
StackHCIStoragePathResource{},
+ StackHCIVirtualHardDiskResource{},
}
}
diff --git a/internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource.go b/internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource.go
new file mode 100644
index 0000000000000..1aee9a66396a7
--- /dev/null
+++ b/internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource.go
@@ -0,0 +1,318 @@
+package azurestackhci
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "time"
+
+ "github.com/hashicorp/go-azure-helpers/lang/pointer"
+ "github.com/hashicorp/go-azure-helpers/lang/response"
+ "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema"
+ "github.com/hashicorp/go-azure-helpers/resourcemanager/location"
+ "github.com/hashicorp/go-azure-helpers/resourcemanager/tags"
+ "github.com/hashicorp/go-azure-sdk/resource-manager/azurestackhci/2024-01-01/storagecontainers"
+ "github.com/hashicorp/go-azure-sdk/resource-manager/azurestackhci/2024-01-01/virtualharddisks"
+ "github.com/hashicorp/go-azure-sdk/resource-manager/extendedlocation/2021-08-15/customlocations"
+ "github.com/hashicorp/terraform-provider-azurerm/internal/sdk"
+ "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk"
+ "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation"
+)
+
+var (
+ _ sdk.Resource = StackHCIVirtualHardDiskResource{}
+ _ sdk.ResourceWithUpdate = StackHCIVirtualHardDiskResource{}
+)
+
+type StackHCIVirtualHardDiskResource struct{}
+
+func (StackHCIVirtualHardDiskResource) IDValidationFunc() pluginsdk.SchemaValidateFunc {
+ return virtualharddisks.ValidateVirtualHardDiskID
+}
+
+func (StackHCIVirtualHardDiskResource) ResourceType() string {
+ return "azurerm_stack_hci_virtual_hard_disk"
+}
+
+func (StackHCIVirtualHardDiskResource) ModelObject() interface{} {
+ return &StackHCIVirtualHardDiskResourceModel{}
+}
+
+type StackHCIVirtualHardDiskResourceModel struct {
+ Name string `tfschema:"name"`
+ ResourceGroupName string `tfschema:"resource_group_name"`
+ Location string `tfschema:"location"`
+ CustomLocationId string `tfschema:"custom_location_id"`
+ BlockSizeInBytes int64 `tfschema:"block_size_in_bytes"`
+ DiskFileFormat string `tfschema:"disk_file_format"`
+ DiskSizeInGB int64 `tfschema:"disk_size_in_gb"`
+ DynamicEnabled bool `tfschema:"dynamic_enabled"`
+ HypervGeneration string `tfschema:"hyperv_generation"`
+ LogicalSectorInBytes int64 `tfschema:"logical_sector_in_bytes"`
+ PhysicalSectorInBytes int64 `tfschema:"physical_sector_in_bytes"`
+ StoragePathId string `tfschema:"storage_path_id"`
+ Tags map[string]interface{} `tfschema:"tags"`
+}
+
+func (StackHCIVirtualHardDiskResource) Arguments() map[string]*pluginsdk.Schema {
+ return map[string]*pluginsdk.Schema{
+ "name": {
+ Type: pluginsdk.TypeString,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validation.StringMatch(
+ regexp.MustCompile(`^[a-zA-Z0-9][-._a-zA-Z0-9]{0,62}[a-zA-Z0-9]$`),
+ "name must be between 2 and 64 characters and can only contain alphanumberic characters, hyphen, dot and underline",
+ ),
+ },
+
+ "resource_group_name": commonschema.ResourceGroupName(),
+
+ "location": commonschema.Location(),
+
+ "custom_location_id": commonschema.ResourceIDReferenceRequiredForceNew(&customlocations.CustomLocationId{}),
+
+ "disk_size_in_gb": {
+ Type: pluginsdk.TypeInt,
+ Required: true,
+ ForceNew: true,
+ ValidateFunc: validation.IntAtLeast(1),
+ },
+
+ "block_size_in_bytes": {
+ Type: pluginsdk.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: validation.IntAtLeast(1),
+ },
+
+ "disk_file_format": {
+ Type: pluginsdk.TypeString,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: validation.StringInSlice([]string{
+ string(virtualharddisks.DiskFileFormatVhd),
+ string(virtualharddisks.DiskFileFormatVhdx),
+ }, false),
+ },
+
+ "dynamic_enabled": {
+ Type: pluginsdk.TypeBool,
+ Optional: true,
+ ForceNew: true,
+ Default: false,
+ },
+
+ "hyperv_generation": {
+ Type: pluginsdk.TypeString,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: validation.StringInSlice([]string{
+ string(virtualharddisks.HyperVGenerationVOne),
+ string(virtualharddisks.HyperVGenerationVTwo),
+ }, false),
+ },
+
+ "logical_sector_in_bytes": {
+ Type: pluginsdk.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: validation.IntAtLeast(1),
+ },
+
+ "physical_sector_in_bytes": {
+ Type: pluginsdk.TypeInt,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: validation.IntAtLeast(1),
+ },
+
+ "storage_path_id": {
+ Type: pluginsdk.TypeString,
+ Optional: true,
+ ForceNew: true,
+ ValidateFunc: storagecontainers.ValidateStorageContainerID,
+ },
+
+ "tags": commonschema.Tags(),
+ }
+}
+
+func (StackHCIVirtualHardDiskResource) Attributes() map[string]*pluginsdk.Schema {
+ return map[string]*pluginsdk.Schema{}
+}
+
+func (r StackHCIVirtualHardDiskResource) Create() sdk.ResourceFunc {
+ return sdk.ResourceFunc{
+ Timeout: 30 * time.Minute,
+ Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error {
+ client := metadata.Client.AzureStackHCI.VirtualHardDisks
+
+ var config StackHCIVirtualHardDiskResourceModel
+ if err := metadata.Decode(&config); err != nil {
+ return fmt.Errorf("decoding: %+v", err)
+ }
+
+ subscriptionId := metadata.Client.Account.SubscriptionId
+ id := virtualharddisks.NewVirtualHardDiskID(subscriptionId, config.ResourceGroupName, config.Name)
+
+ existing, err := client.Get(ctx, id)
+ if err != nil && !response.WasNotFound(existing.HttpResponse) {
+ return fmt.Errorf("checking for presence of existing %s: %+v", id, err)
+ }
+ if !response.WasNotFound(existing.HttpResponse) {
+ return metadata.ResourceRequiresImport(r.ResourceType(), id)
+ }
+
+ payload := virtualharddisks.VirtualHardDisks{
+ Name: pointer.To(config.Name),
+ Location: location.Normalize(config.Location),
+ Tags: tags.Expand(config.Tags),
+ ExtendedLocation: &virtualharddisks.ExtendedLocation{
+ Name: pointer.To(config.CustomLocationId),
+ Type: pointer.To(virtualharddisks.ExtendedLocationTypesCustomLocation),
+ },
+ Properties: &virtualharddisks.VirtualHardDiskProperties{
+ Dynamic: pointer.To(config.DynamicEnabled),
+ DiskSizeGB: pointer.To(config.DiskSizeInGB),
+ },
+ }
+
+ if config.BlockSizeInBytes != 0 {
+ payload.Properties.BlockSizeBytes = pointer.To(config.BlockSizeInBytes)
+ }
+
+ if config.StoragePathId != "" {
+ payload.Properties.ContainerId = pointer.To(config.StoragePathId)
+ }
+
+ if config.DiskFileFormat != "" {
+ payload.Properties.DiskFileFormat = pointer.To(virtualharddisks.DiskFileFormat(config.DiskFileFormat))
+ }
+
+ if config.HypervGeneration != "" {
+ payload.Properties.HyperVGeneration = pointer.To(virtualharddisks.HyperVGeneration(config.HypervGeneration))
+ }
+
+ if config.LogicalSectorInBytes != 0 {
+ payload.Properties.LogicalSectorBytes = pointer.To(config.LogicalSectorInBytes)
+ }
+
+ if config.PhysicalSectorInBytes != 0 {
+ payload.Properties.PhysicalSectorBytes = pointer.To(config.PhysicalSectorInBytes)
+ }
+
+ if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil {
+ return fmt.Errorf("performing create %s: %+v", id, err)
+ }
+
+ metadata.SetID(id)
+
+ return nil
+ },
+ }
+}
+
+func (r StackHCIVirtualHardDiskResource) Read() sdk.ResourceFunc {
+ return sdk.ResourceFunc{
+ Timeout: 5 * time.Minute,
+ Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error {
+ client := metadata.Client.AzureStackHCI.VirtualHardDisks
+
+ id, err := virtualharddisks.ParseVirtualHardDiskID(metadata.ResourceData.Id())
+ if err != nil {
+ return err
+ }
+
+ resp, err := client.Get(ctx, *id)
+ if err != nil {
+ if response.WasNotFound(resp.HttpResponse) {
+ return metadata.MarkAsGone(id)
+ }
+
+ return fmt.Errorf("retrieving %s: %+v", id, err)
+ }
+
+ schema := StackHCIVirtualHardDiskResourceModel{
+ Name: id.VirtualHardDiskName,
+ ResourceGroupName: id.ResourceGroupName,
+ }
+
+ if model := resp.Model; model != nil {
+ schema.Location = location.Normalize(model.Location)
+ schema.Tags = tags.Flatten(model.Tags)
+
+ if model.ExtendedLocation != nil && model.ExtendedLocation.Name != nil {
+ customLocationId, err := customlocations.ParseCustomLocationIDInsensitively(*model.ExtendedLocation.Name)
+ if err != nil {
+ return err
+ }
+
+ schema.CustomLocationId = customLocationId.ID()
+ }
+
+ if props := model.Properties; props != nil {
+ schema.BlockSizeInBytes = pointer.From(props.BlockSizeBytes)
+ schema.DiskFileFormat = string(pointer.From(props.DiskFileFormat))
+ schema.DiskSizeInGB = pointer.From(props.DiskSizeGB)
+ schema.DynamicEnabled = pointer.From(props.Dynamic)
+ schema.HypervGeneration = string(pointer.From(props.HyperVGeneration))
+ schema.LogicalSectorInBytes = pointer.From(props.LogicalSectorBytes)
+ schema.PhysicalSectorInBytes = pointer.From(props.PhysicalSectorBytes)
+ schema.StoragePathId = pointer.From(props.ContainerId)
+ }
+ }
+
+ return metadata.Encode(&schema)
+ },
+ }
+}
+
+func (r StackHCIVirtualHardDiskResource) Update() sdk.ResourceFunc {
+ return sdk.ResourceFunc{
+ Timeout: 30 * time.Minute,
+ Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error {
+ client := metadata.Client.AzureStackHCI.VirtualHardDisks
+
+ id, err := virtualharddisks.ParseVirtualHardDiskID(metadata.ResourceData.Id())
+ if err != nil {
+ return err
+ }
+
+ var model StackHCIVirtualHardDiskResourceModel
+ if err := metadata.Decode(&model); err != nil {
+ return fmt.Errorf("decoding: %+v", err)
+ }
+
+ parameters := virtualharddisks.VirtualHardDisksUpdateRequest{}
+ if metadata.ResourceData.HasChange("tags") {
+ parameters.Tags = tags.Expand(model.Tags)
+ }
+
+ if err := client.UpdateThenPoll(ctx, *id, parameters); err != nil {
+ return fmt.Errorf("updating %s: %+v", id, err)
+ }
+ return nil
+ },
+ }
+}
+
+func (r StackHCIVirtualHardDiskResource) Delete() sdk.ResourceFunc {
+ return sdk.ResourceFunc{
+ Timeout: 30 * time.Minute,
+ Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error {
+ client := metadata.Client.AzureStackHCI.VirtualHardDisks
+
+ id, err := virtualharddisks.ParseVirtualHardDiskID(metadata.ResourceData.Id())
+ if err != nil {
+ return err
+ }
+
+ if err := client.DeleteThenPoll(ctx, *id); err != nil {
+ return fmt.Errorf("deleting %s: %+v", id, err)
+ }
+
+ return nil
+ },
+ }
+}
diff --git a/internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource_test.go b/internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource_test.go
new file mode 100644
index 0000000000000..aaa4d1bf9bb0e
--- /dev/null
+++ b/internal/services/azurestackhci/stack_hci_virtual_hard_disk_resource_test.go
@@ -0,0 +1,271 @@
+package azurestackhci_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/go-azure-helpers/lang/pointer"
+ "github.com/hashicorp/go-azure-sdk/resource-manager/azurestackhci/2024-01-01/virtualharddisks"
+ "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance"
+ "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check"
+ "github.com/hashicorp/terraform-provider-azurerm/internal/clients"
+ "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk"
+)
+
+type StackHCIVirtualHardDiskResource struct{}
+
+func TestAccStackHCIVirtualHardDisk_basic(t *testing.T) {
+ if os.Getenv(customLocationIdEnv) == "" {
+ t.Skipf("skipping since %q has not been specified", customLocationIdEnv)
+ }
+
+ data := acceptance.BuildTestData(t, "azurerm_stack_hci_virtual_hard_disk", "test")
+ r := StackHCIVirtualHardDiskResource{}
+
+ data.ResourceTest(t, r, []acceptance.TestStep{
+ {
+ Config: r.basic(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ })
+}
+
+func TestAccStackHCIVirtualHardDisk_complete(t *testing.T) {
+ if os.Getenv(customLocationIdEnv) == "" {
+ t.Skipf("skipping since %q has not been specified", customLocationIdEnv)
+ }
+
+ data := acceptance.BuildTestData(t, "azurerm_stack_hci_virtual_hard_disk", "test")
+ r := StackHCIVirtualHardDiskResource{}
+
+ data.ResourceTest(t, r, []acceptance.TestStep{
+ {
+ Config: r.complete(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ })
+}
+
+func TestAccStackHCIVirtualHardDisk_update(t *testing.T) {
+ if os.Getenv(customLocationIdEnv) == "" {
+ t.Skipf("skipping since %q has not been specified", customLocationIdEnv)
+ }
+
+ data := acceptance.BuildTestData(t, "azurerm_stack_hci_virtual_hard_disk", "test")
+ r := StackHCIVirtualHardDiskResource{}
+
+ data.ResourceTest(t, r, []acceptance.TestStep{
+ {
+ Config: r.basic(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ {
+ Config: r.update(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ {
+ Config: r.updateTag(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ {
+ Config: r.basic(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ })
+}
+
+func TestAccStackHCIVirtualHardDisk_requiresImport(t *testing.T) {
+ if os.Getenv(customLocationIdEnv) == "" {
+ t.Skipf("skipping since %q has not been specified", customLocationIdEnv)
+ }
+
+ data := acceptance.BuildTestData(t, "azurerm_stack_hci_virtual_hard_disk", "test")
+ r := StackHCIVirtualHardDiskResource{}
+
+ data.ResourceTest(t, r, []acceptance.TestStep{
+ {
+ Config: r.basic(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.RequiresImportErrorStep(r.requiresImport),
+ })
+}
+
+func (r StackHCIVirtualHardDiskResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) {
+ clusterClient := client.AzureStackHCI.VirtualHardDisks
+ id, err := virtualharddisks.ParseVirtualHardDiskID(state.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := clusterClient.Get(ctx, *id)
+ if err != nil {
+ return nil, fmt.Errorf("retrieving %s: %+v", *id, err)
+ }
+
+ return pointer.To(resp.Model != nil), nil
+}
+
+func (r StackHCIVirtualHardDiskResource) basic(data acceptance.TestData) string {
+ template := r.template(data)
+ return fmt.Sprintf(`
+%s
+
+provider "azurerm" {
+ features {}
+}
+
+resource "azurerm_stack_hci_virtual_hard_disk" "test" {
+ name = "acctest-vhd-%s"
+ resource_group_name = azurerm_resource_group.test.name
+ location = azurerm_resource_group.test.location
+ custom_location_id = %q
+ disk_size_in_gb = 2
+
+ lifecycle {
+ ignore_changes = [storage_path_id]
+ }
+}
+`, template, data.RandomString, os.Getenv(customLocationIdEnv))
+}
+
+func (r StackHCIVirtualHardDiskResource) update(data acceptance.TestData) string {
+ template := r.template(data)
+ return fmt.Sprintf(`
+%s
+
+provider "azurerm" {
+ features {}
+}
+
+resource "azurerm_stack_hci_virtual_hard_disk" "test" {
+ name = "acctest-vhd-%s"
+ resource_group_name = azurerm_resource_group.test.name
+ location = azurerm_resource_group.test.location
+ custom_location_id = %q
+ disk_size_in_gb = 2
+
+ tags = {
+ foo = "bar"
+ }
+
+ lifecycle {
+ ignore_changes = [storage_path_id]
+ }
+}
+`, template, data.RandomString, os.Getenv(customLocationIdEnv))
+}
+
+func (r StackHCIVirtualHardDiskResource) updateTag(data acceptance.TestData) string {
+ template := r.template(data)
+ return fmt.Sprintf(`
+%s
+
+provider "azurerm" {
+ features {}
+}
+
+resource "azurerm_stack_hci_virtual_hard_disk" "test" {
+ name = "acctest-vhd-%s"
+ resource_group_name = azurerm_resource_group.test.name
+ location = azurerm_resource_group.test.location
+ custom_location_id = %q
+ disk_size_in_gb = 2
+
+ tags = {
+ env = "test"
+ foo = "bar"
+ }
+
+ lifecycle {
+ ignore_changes = [storage_path_id]
+ }
+}
+`, template, data.RandomString, os.Getenv(customLocationIdEnv))
+}
+
+func (r StackHCIVirtualHardDiskResource) complete(data acceptance.TestData) string {
+ template := r.template(data)
+ return fmt.Sprintf(`
+%[1]s
+
+provider "azurerm" {
+ features {}
+}
+
+resource "azurerm_stack_hci_storage_path" "test" {
+ name = "acctest-sp-%[2]s"
+ resource_group_name = azurerm_resource_group.test.name
+ location = azurerm_resource_group.test.location
+ custom_location_id = %[3]q
+ path = "C:\\ClusterStorage\\UserStorage_2\\sp-%[2]s"
+}
+
+resource "azurerm_stack_hci_virtual_hard_disk" "test" {
+ name = "acctest-vhd-%[2]s"
+ resource_group_name = azurerm_resource_group.test.name
+ location = azurerm_resource_group.test.location
+ custom_location_id = %[3]q
+ disk_size_in_gb = 2
+ dynamic_enabled = false
+ hyperv_generation = "V2"
+ physical_sector_in_bytes = 4096
+ logical_sector_in_bytes = 512
+ block_size_in_bytes = 1024
+ disk_file_format = "vhdx"
+ storage_path_id = azurerm_stack_hci_storage_path.test.id
+
+ tags = {
+ foo = "bar"
+ env = "test"
+ }
+}
+`, template, data.RandomString, os.Getenv(customLocationIdEnv))
+}
+
+func (r StackHCIVirtualHardDiskResource) requiresImport(data acceptance.TestData) string {
+ config := r.basic(data)
+
+ return fmt.Sprintf(`
+%s
+
+resource "azurerm_stack_hci_virtual_hard_disk" "import" {
+ name = azurerm_stack_hci_virtual_hard_disk.test.name
+ resource_group_name = azurerm_stack_hci_virtual_hard_disk.test.resource_group_name
+ location = azurerm_stack_hci_virtual_hard_disk.test.location
+ custom_location_id = azurerm_stack_hci_virtual_hard_disk.test.custom_location_id
+ disk_size_in_gb = azurerm_stack_hci_virtual_hard_disk.test.disk_size_in_gb
+}
+`, config)
+}
+
+func (r StackHCIVirtualHardDiskResource) template(data acceptance.TestData) string {
+ return fmt.Sprintf(`
+resource "azurerm_resource_group" "test" {
+ name = "acctest-hci-vhd-%[2]s"
+ location = %[1]q
+}
+`, data.Locations.Primary, data.RandomString)
+}
diff --git a/internal/services/containerapps/container_app_data_source.go b/internal/services/containerapps/container_app_data_source.go
index 4334fda441cef..615cf64f7d5c2 100644
--- a/internal/services/containerapps/container_app_data_source.go
+++ b/internal/services/containerapps/container_app_data_source.go
@@ -30,6 +30,7 @@ type ContainerAppDataSourceModel struct {
ManagedEnvironmentId string `tfschema:"container_app_environment_id"`
Location string `tfschema:"location"`
RevisionMode string `tfschema:"revision_mode"`
+ MaxInactiveRevisions int64 `tfschema:"max_inactive_revisions"`
Ingress []helpers.Ingress `tfschema:"ingress"`
Registries []helpers.Registry `tfschema:"registry"`
Secrets []helpers.Secret `tfschema:"secret"`
@@ -117,6 +118,11 @@ func (r ContainerAppDataSource) Attributes() map[string]*pluginsdk.Schema {
Type: pluginsdk.TypeString,
Computed: true,
},
+
+ "max_inactive_revisions": {
+ Type: pluginsdk.TypeInt,
+ Computed: true,
+ },
}
}
@@ -172,6 +178,7 @@ func (r ContainerAppDataSource) Read() sdk.ResourceFunc {
containerApp.Ingress = helpers.FlattenContainerAppIngress(config.Ingress, id.ContainerAppName)
containerApp.Registries = helpers.FlattenContainerAppRegistries(config.Registries)
containerApp.Dapr = helpers.FlattenContainerAppDapr(config.Dapr)
+ containerApp.MaxInactiveRevisions = pointer.ToInt64(config.MaxInactiveRevisions)
}
}
containerApp.LatestRevisionName = pointer.From(props.LatestRevisionName)
diff --git a/internal/services/containerapps/container_app_resource.go b/internal/services/containerapps/container_app_resource.go
index d83027d24eea6..2757148bae7b6 100644
--- a/internal/services/containerapps/container_app_resource.go
+++ b/internal/services/containerapps/container_app_resource.go
@@ -39,9 +39,10 @@ type ContainerAppModel struct {
Dapr []helpers.Dapr `tfschema:"dapr"`
Template []helpers.ContainerTemplate `tfschema:"template"`
- Identity []identity.ModelSystemAssignedUserAssigned `tfschema:"identity"`
- WorkloadProfileName string `tfschema:"workload_profile_name"`
- Tags map[string]interface{} `tfschema:"tags"`
+ Identity []identity.ModelSystemAssignedUserAssigned `tfschema:"identity"`
+ WorkloadProfileName string `tfschema:"workload_profile_name"`
+ MaxInactiveRevisions int64 `tfschema:"max_inactive_revisions"`
+ Tags map[string]interface{} `tfschema:"tags"`
OutboundIpAddresses []string `tfschema:"outbound_ip_addresses"`
LatestRevisionName string `tfschema:"latest_revision_name"`
@@ -112,6 +113,12 @@ func (r ContainerAppResource) Arguments() map[string]*pluginsdk.Schema {
ValidateFunc: validation.StringIsNotEmpty,
},
+ "max_inactive_revisions": {
+ Type: pluginsdk.TypeInt,
+ Optional: true,
+ ValidateFunc: validation.IntBetween(0, 100),
+ },
+
"tags": commonschema.Tags(),
}
}
@@ -199,10 +206,11 @@ func (r ContainerAppResource) Create() sdk.ResourceFunc {
Location: location.Normalize(env.Model.Location),
Properties: &containerapps.ContainerAppProperties{
Configuration: &containerapps.Configuration{
- Ingress: helpers.ExpandContainerAppIngress(app.Ingress, id.ContainerAppName),
- Dapr: helpers.ExpandContainerAppDapr(app.Dapr),
- Secrets: secrets,
- Registries: registries,
+ Ingress: helpers.ExpandContainerAppIngress(app.Ingress, id.ContainerAppName),
+ Dapr: helpers.ExpandContainerAppDapr(app.Dapr),
+ Secrets: secrets,
+ Registries: registries,
+ MaxInactiveRevisions: pointer.FromInt64(app.MaxInactiveRevisions),
},
ManagedEnvironmentId: pointer.To(app.ManagedEnvironmentId),
Template: helpers.ExpandContainerAppTemplate(app.Template, metadata),
@@ -279,6 +287,7 @@ func (r ContainerAppResource) Read() sdk.ResourceFunc {
state.Ingress = helpers.FlattenContainerAppIngress(config.Ingress, id.ContainerAppName)
state.Registries = helpers.FlattenContainerAppRegistries(config.Registries)
state.Dapr = helpers.FlattenContainerAppDapr(config.Dapr)
+ state.MaxInactiveRevisions = pointer.ToInt64(config.MaxInactiveRevisions)
}
state.LatestRevisionName = pointer.From(props.LatestRevisionName)
state.LatestRevisionFqdn = pointer.From(props.LatestRevisionFqdn)
@@ -375,6 +384,10 @@ func (r ContainerAppResource) Update() sdk.ResourceFunc {
}
}
+ if metadata.ResourceData.HasChange("max_inactive_revisions") {
+ model.Properties.Configuration.MaxInactiveRevisions = pointer.FromInt64(state.MaxInactiveRevisions)
+ }
+
if metadata.ResourceData.HasChange("dapr") {
model.Properties.Configuration.Dapr = helpers.ExpandContainerAppDapr(state.Dapr)
diff --git a/internal/services/containerapps/container_app_resource_test.go b/internal/services/containerapps/container_app_resource_test.go
index b98919252059b..1958b721debee 100644
--- a/internal/services/containerapps/container_app_resource_test.go
+++ b/internal/services/containerapps/container_app_resource_test.go
@@ -576,6 +576,44 @@ func TestAccContainerAppResource_ingressTrafficValidation(t *testing.T) {
})
}
+func TestAccContainerAppResource_maxInactiveRevisionsValidation(t *testing.T) {
+ data := acceptance.BuildTestData(t, "azurerm_container_app", "test")
+ r := ContainerAppResource{}
+
+ data.ResourceTest(t, r, []acceptance.TestStep{
+ {
+ Config: r.maxInactiveRevisionsValidation(data, -1),
+ ExpectError: regexp.MustCompile("max_inactive_revisions must be between 0 and 100"),
+ },
+ {
+ Config: r.maxInactiveRevisionsValidation(data, 101),
+ ExpectError: regexp.MustCompile("max_inactive_revisions must be between 0 and 100"),
+ },
+ })
+}
+
+func TestAccContainerAppResource_maxInactiveRevisionsUpdate(t *testing.T) {
+ data := acceptance.BuildTestData(t, "azurerm_container_app", "test")
+ r := ContainerAppResource{}
+
+ data.ResourceTest(t, r, []acceptance.TestStep{
+ {
+ Config: r.basic(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ {
+ Config: r.maxInactiveRevisionsChange(data),
+ Check: acceptance.ComposeTestCheckFunc(
+ check.That(data.ResourceName).ExistsInAzure(r),
+ ),
+ },
+ data.ImportStep(),
+ })
+}
+
func (r ContainerAppResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) {
id, err := containerapps.ParseContainerAppID(state.ID)
if err != nil {
@@ -1304,6 +1342,7 @@ resource "azurerm_container_app" "test" {
resource_group_name = azurerm_resource_group.test.name
container_app_environment_id = azurerm_container_app_environment.test.id
revision_mode = "Single"
+ max_inactive_revisions = 25
template {
container {
@@ -2832,3 +2871,40 @@ traffic_weight {
}
`
}
+
+func (r ContainerAppResource) maxInactiveRevisionsChange(data acceptance.TestData) string {
+ return fmt.Sprintf(`
+%s
+
+resource "azurerm_container_app" "test" {
+ name = "acctest-capp-%[2]d"
+ resource_group_name = azurerm_resource_group.test.name
+ container_app_environment_id = azurerm_container_app_environment.test.id
+ revision_mode = "Single"
+ max_inactive_revisions = 50
+
+ template {
+ container {
+ name = "acctest-cont-%[2]d"
+ image = "jackofallops/azure-containerapps-python-acctest:v0.0.1"
+ cpu = 0.25
+ memory = "0.5Gi"
+ }
+ }
+}
+`, r.template(data), data.RandomInteger)
+}
+
+func (r ContainerAppResource) maxInactiveRevisionsValidation(data acceptance.TestData, maxInactiveResivions int) string {
+ return fmt.Sprintf(`
+%s
+
+resource "azurerm_container_app" "test" {
+ name = "acctest-capp-%[2]d"
+ resource_group_name = azurerm_resource_group.test.name
+ container_app_environment_id = azurerm_container_app_environment.test.id
+ revision_mode = "Single"
+ max_inactive_revisions = %d
+}
+`, r.template(data), data.RandomInteger, maxInactiveResivions)
+}
diff --git a/website/docs/d/container_app.html.markdown b/website/docs/d/container_app.html.markdown
index 759efc2680116..a845098a80504 100644
--- a/website/docs/d/container_app.html.markdown
+++ b/website/docs/d/container_app.html.markdown
@@ -49,6 +49,8 @@ In addition to the Arguments listed above - the following Attributes are exporte
* `workload_profile_name` - The name of the Workload Profile in the Container App Environment in which this Container App is running.
+* `max_inactive_revisions` - The max inactive revisions for this Container App.
+
* `tags` - A mapping of tags to assign to the Container App.
---
diff --git a/website/docs/r/container_app.html.markdown b/website/docs/r/container_app.html.markdown
index 589b63acca2c5..202975553caa1 100644
--- a/website/docs/r/container_app.html.markdown
+++ b/website/docs/r/container_app.html.markdown
@@ -32,6 +32,7 @@ resource "azurerm_container_app_environment" "example" {
resource_group_name = azurerm_resource_group.example.name
log_analytics_workspace_id = azurerm_log_analytics_workspace.example.id
}
+
resource "azurerm_container_app" "example" {
name = "example-app"
container_app_environment_id = azurerm_container_app_environment.example.id
@@ -79,6 +80,8 @@ The following arguments are supported:
~> **Note:** Omit this value to use the default `Consumption` Workload Profile.
+* `max_inactive_revisions` - (Optional) The maximum of inactive revisions allowed for this Container App.
+
* `tags` - (Optional) A mapping of tags to assign to the Container App.
---
diff --git a/website/docs/r/stack_hci_virtual_hard_disk.html.markdown b/website/docs/r/stack_hci_virtual_hard_disk.html.markdown
new file mode 100644
index 0000000000000..a25d4f0376039
--- /dev/null
+++ b/website/docs/r/stack_hci_virtual_hard_disk.html.markdown
@@ -0,0 +1,100 @@
+---
+subcategory: "Azure Stack HCI"
+layout: "azurerm"
+page_title: "Azure Resource Manager: azurerm_stack_hci_virtual_hard_disk"
+description: |-
+ Manages an Azure Stack HCI Virtual Hard Disk.
+---
+
+# azurerm_stack_hci_virtual_hard_disk
+
+Manages an Azure Stack HCI Virtual Hard Disk.
+
+## Example Usage
+
+```hcl
+resource "azurerm_resource_group" "example" {
+ name = "example-rg"
+ location = "West Europe"
+}
+
+resource "azurerm_stack_hci_storage_path" "example" {
+ name = "example-sp"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ custom_location_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.ExtendedLocation/customLocations/cl1"
+ path = "C:\\ClusterStorage\\UserStorage_2\\sp-example"
+ tags = {
+ foo = "bar"
+ }
+}
+
+resource "azurerm_stack_hci_virtual_hard_disk" "example" {
+ name = "example-vhd"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ custom_location_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.ExtendedLocation/customLocations/cl1"
+ disk_size_in_gb = 2
+ storage_path_id = azurerm_stack_hci_storage_path.example.id
+ tags = {
+ foo = "bar"
+ }
+}
+```
+
+## Arguments Reference
+
+The following arguments are supported:
+
+* `name` - (Required) The name which should be used for this Azure Stack HCI Virtual Hard Disk. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `resource_group_name` - (Required) The name of the Resource Group where the Azure Stack HCI Virtual Hard Disk should exist. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `location` - (Required) The Azure Region where the Azure Stack HCI Virtual Hard Disk should exist. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `custom_location_id` - (Required) The ID of the Custom Location where the Azure Stack HCI Virtual Hard Disk should exist. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `disk_size_in_gb` - (Required) The size of the disk in GB. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+---
+
+* `block_size_in_bytes` - (Optional) The block size of the disk in bytes. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `disk_file_format` - (Optional) The format of the disk file. Possible values are `vhdx` and `vhd`. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `dynamic_enabled` - (Optional) Whether to enable dynamic sizing for the Azure Stack HCI Virtual Hard Disk. Defaults to `false`. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `hyperv_generation` - (Optional) The hypervisor generation of the Azure Stack HCI Virtual Hard Disk. Possible values are `V1` and `V2`. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `logical_sector_in_bytes` - (Optional) The logical sector size of the disk in bytes. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `physical_sector_in_bytes` - (Optional) The physical sector size of the disk in bytes. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+* `storage_path_id` - (Optional) The ID of the Azure Stack HCI Storage Path used for this Virtual Hard Disk. Changing this forces a new Azure Stack HCI Virtual Hard Disk to be created.
+
+-> **Note:** If `storage_path_id` is not specified, the Virtual Hard Disk will be placed in a high availability Storage Path. If you experience a diff you may need to add this to `ignore_changes`.
+
+* `tags` - (Optional) A mapping of tags which should be assigned to the Azure Stack HCI Virtual Hard Disk.
+
+## Attributes Reference
+
+In addition to the Arguments listed above - the following Attributes are exported:
+
+* `id` - The ID of the Azure Stack HCI Virtual Hard Disk.
+
+## Timeouts
+
+The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions:
+
+* `create` - (Defaults to 30 minutes) Used when creating the Azure Stack HCI Virtual Hard Disk.
+* `read` - (Defaults to 5 minutes) Used when retrieving the Azure Stack HCI Virtual Hard Disk.
+* `update` - (Defaults to 30 minutes) Used when updating the Azure Stack HCI Virtual Hard Disk.
+* `delete` - (Defaults to 30 minutes) Used when deleting the Azure Stack HCI Virtual Hard Disk.
+
+## Import
+
+Azure Stack HCI Virtual Hard Disks can be imported using the `resource id`, e.g.
+
+```shell
+terraform import azurerm_stack_hci_virtual_hard_disk.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.AzureStackHCI/virtualHardDisks/disk1
+```